1 Introduction

This notebook contains analyses from the MEDAL study, assessing memory bias in remitted and depressed individuals. The pre-registration for the main analyses, as well as a preprint version of the accompanying article can be found here as well. The main aim of the current work was to examine how three groups (control, remitted, depressed) differ in terms of emotional memory dynamics in a real-life setting.

#Load packages 
renv::activate()
* Project 'Z:/medal_membias' loaded. [renv 0.15.5]
library(dplyr)

Attaching package: ‘dplyr’

The following objects are masked from ‘package:stats’:

    filter, lag

The following objects are masked from ‘package:base’:

    intersect, setdiff, setequal, union
library(janitor)

Attaching package: ‘janitor’

The following objects are masked from ‘package:stats’:

    chisq.test, fisher.test
library(readxl)
library(data.table)
data.table 1.14.4 using 8 threads (see ?getDTthreads).  Latest news: r-datatable.com

Attaching package: ‘data.table’

The following objects are masked from ‘package:dplyr’:

    between, first, last
library(ggplot2)
library(psych)

Attaching package: ‘psych’

The following objects are masked from ‘package:ggplot2’:

    %+%, alpha
library(devtools)
Loading required package: usethis
library(pastecs)

Attaching package: ‘pastecs’

The following objects are masked from ‘package:data.table’:

    first, last

The following objects are masked from ‘package:dplyr’:

    first, last
library(cli)
library(lmerTest) #linear mixed models 
Loading required package: lme4
Loading required package: Matrix

Attaching package: ‘lmerTest’

The following object is masked from ‘package:lme4’:

    lmer

The following object is masked from ‘package:stats’:

    step
library(performance) #fits of residuals 
library(DescTools)
Registered S3 method overwritten by 'DescTools':
  method        from       
  print.palette wesanderson

Attaching package: ‘DescTools’

The following objects are masked from ‘package:psych’:

    AUC, ICC, SD

The following object is masked from ‘package:data.table’:

    %like%
library(tidyr) # Separate trigger column

Attaching package: ‘tidyr’

The following objects are masked from ‘package:Matrix’:

    expand, pack, unpack

The following object is masked from ‘package:pastecs’:

    extract
library(sjPlot) # for nice tables and interaction plots 
Learn more about sjPlot with 'browseVignettes("sjPlot")'.
library(knitr) # to print the tables within notebook
library(foreach) #run parallel loops

Attaching package: ‘foreach’

The following object is masked from ‘package:DescTools’:

    %:%
library(parallel)
library(doParallel) #run parallel loops
Loading required package: iterators
library(ggpubr) # emmeans
library(mediation) #mediation analysis 
Loading required package: MASS

Attaching package: ‘MASS’

The following object is masked from ‘package:dplyr’:

    select

Loading required package: mvtnorm
Loading required package: sandwich
mediation: Causal Mediation Analysis
Version: 4.5.0


Attaching package: ‘mediation’

The following object is masked from ‘package:psych’:

    mediate
library(plotly) #for interactive plots

Attaching package: ‘plotly’

The following object is masked from ‘package:MASS’:

    select

The following object is masked from ‘package:ggplot2’:

    last_plot

The following object is masked from ‘package:stats’:

    filter

The following object is masked from ‘package:graphics’:

    layout
library(jtools) #for theme_apa

Attaching package: ‘jtools’

The following object is masked from ‘package:DescTools’:

    %nin%
library(JSmediation) #moderated mediation
library(wesanderson) 
source("functions.R")

2 Data

Here we load in our data and preprocess it in steps. We first need to separate the different surveys that were delivered to bring them into the same sampling times, merge in some descriptives we need as covariates, and estimate subject-centered and lagged variables for our models later.

  1. Load and clean data
# Load the data
MEDAL_ESM_compleet <- read_excel("data/MEDAL_ESM_compleet.xlsx")
# Move variables around and clean names
MEDAL_ESM_compleet= MEDAL_ESM_compleet %>%
  rename(id='Movisens-ID') %>% 
  clean_names() %>%
  rename(subjectcode = 'deelnemersnr') %>% 
  relocate('subjectcode', .after = 'id')
df_medal_try = MEDAL_ESM_compleet
  1. Split different EMA surveys we have
# Sleep EMA Qs
df_medal_sleep = df_medal_try %>% 
  filter(form=='Slaap') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, 14)) 
# Recent EMA Qs
df_medal_recent = df_medal_try %>% 
  filter(form=='Recent') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rec')))
# Remote EMA Qs
df_medal_remote = df_medal_try %>%
  filter(form=='Remote') %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rem')))
# Mastery EMA Qs
df_medal_mastery = df_medal_try %>%
  filter(form=='Mastery') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('master')))
# Standard Qs
df_medal_standard = df_medal_try %>%
  filter(form=='Standaard')  %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, (-contains('mastery') & -contains('rem') & -contains('rec')))) %>% 
  dplyr::select(-c(7,13,14))

# merge sleep and standard
df_medal_merged = merge(df_medal_sleep, df_medal_standard, by=c('id', 'trigger_counter', 'subjectcode'), suffixes = c("_sleep", ""), all=T )

# merge to recent, remote and mastery
df_medal_merged= merge(df_medal_merged, df_medal_recent, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_recent'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_remote, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_remote'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_mastery, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_mastery'), all = T )

# Fix start dates and times
df_medal_merged$form_start_date <- as.character(df_medal_merged$form_start_date)
df_medal_merged$form_start_date <- as.ITime(df_medal_merged$form_start_date)
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(form_start_date = ifelse(is.na(form_start_date), form_start_date_sleep, form_start_date))
  1. Add variables on Education, Gender, and Age
# Load in previous data for demographics (also contains EMA)
load("data/IPAQ_Maeve_Workspace.RData")
df_MEDAL <- df_MEDAL[-c(192), ]

# Select relevant demo info
df_MEDAL = df_MEDAL %>% 
  dplyr::select(1, 3, 5:7) %>% 
  rename('subjectcode' = 'subject', 'education' = 'Education3groups', 'n_episodes'='NumberEpisodes' )

# merge demo to full EMA
df_medal_merged =  merge(df_medal_merged, df_MEDAL, by = c('subjectcode'), all = T ) %>%
  clean_names() 

df_medal_merged$education = as.factor(df_medal_merged$education)
df_medal_merged$gender = as.factor(df_medal_merged$gender)

df_medal_merged = df_medal_merged %>% 
  relocate(c('id', 'gender', 'age', 'education'), .after = 'subjectcode') %>% 
  relocate ('age', .after = 'gender') %>% 
  relocate ('education', .after = 'age') %>% 
  relocate ('id', .before = 'subjectcode')
  1. Categorize the subjectcode into the three conditions (Depressed, Remitted, Control (Never-depressed))
df_medal_merged$condition = case_when(df_medal_merged$subjectcode >= 700 & df_medal_merged$subjectcode < 800 ~ "remitted",
                                      df_medal_merged$subjectcode >= 800 & df_medal_merged$subjectcode < 900 ~ "depressed", 
                                      df_medal_merged$subjectcode >= 900 ~ "control")
df_medal_merged = df_medal_merged %>% relocate ('condition', .after = 'subjectcode' )
df_medal_merged$condition <- as.factor(df_medal_merged$condition)
  1. Calculate reaction time
df_medal_merged$rt_sleep = lubridate::as.difftime( df_medal_merged$form_finish_date_sleep - df_medal_merged$form_start_date_sleep)
  1. Separate time from when trigger was sent

df_medal_merged = df_medal_merged %>% 
  tidyr::separate(trigger, c('Random', 'Time', 'Trigger_Time_1', 'Trigger_Time_2'))  %>% 
  dplyr::mutate(trigger_time = paste(Trigger_Time_1, Trigger_Time_2, sep=':')) %>%#separate time from previous trigger
  dplyr::mutate(trigger = paste(Random, Time, trigger_time, sep=' ')) #bring back 'trigger' column 

#Turn numbers into time format
df_medal_merged$trigger_time <- as.ITime(df_medal_merged$trigger_time)
  1. Find the trigger number per day
#df_medal_merged$trigger_counter[is.na(df_medal_merged$trigger_time)] <- 1

df_medal_merged$trigger_number[is.na(df_medal_merged$trigger_time)] <- 1 
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("10:00:00"))& df_medal_merged$trigger_time < (as.ITime("12:00:00"))] <- 2
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("12:00:00"))& df_medal_merged$trigger_time < (as.ITime("14:00:00"))] <- 3
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("14:00:00"))& df_medal_merged$trigger_time < (as.ITime("16:00:00"))] <- 4
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("16:00:00"))& df_medal_merged$trigger_time < (as.ITime("18:00:00"))] <- 5
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("18:00:00"))& df_medal_merged$trigger_time < (as.ITime("20:00:00"))] <- 6
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("20:00:00"))& df_medal_merged$trigger_time < (as.ITime("23:00:01"))] <- 7

# df
df_medal_merged = df_medal_merged %>% 
  relocate ('trigger_number', .after = 'trigger_counter')%>% relocate('trigger_time', .before ='form_start_date') %>%
  relocate('trigger', .before ='trigger_time')
  1. Final cleaning of the dataset

#Create dataset where double morning list is removed 
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter))

# now we have the clean dataframe, remove duplicated coloumns
df_medal_clean = df_medal_merged %>%
  dplyr::group_by(subjectcode) %>% 
  distinct(trigger_counter, .keep_all = TRUE) %>% 
  dplyr::select (1:12, 17:60) %>% 
  dplyr::mutate(original_order = row_number()) %>% 
  relocate('original_order', .before = 'id')

#Set the order of the factor to control, remission, depression (for visual purposes)
df_medal_clean$condition <- factor(df_medal_clean$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('control', 'remitted', 'depressed'))

# Create dummy variable 
df_medal_clean = df_medal_clean %>% 
  mutate(condition_dummy = case_when(condition == 'depressed'~ 3, condition == 'remitted'~ 2, condition == 'control' ~ 1))

df_medal_clean$condition_dummy = as.factor(df_medal_clean$condition_dummy)

#set weekday 
df_medal_clean$form_finish_date <- as.character(df_medal_clean$form_finish_date)
df_medal_clean$form_finish_date <- strptime(df_medal_clean$form_finish_date, format = '%Y-%m-%d %H:%M:%S')

df_medal_clean$finish_date <- as.Date(df_medal_clean$form_finish_date)
df_medal_clean$weekday <- as.integer(format(df_medal_clean$finish_date, '%w'))
df_medal_clean = df_medal_clean %>% relocate('weekday', .after = 'trigger_number')

#solve duplicate issue 
df_medal_duplicate = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% filter(duplicated(trigger_number))

  #since row 10 for subject 712 is a duplicate morning list
  df_medal_clean <- subset(df_medal_clean, !(subjectcode == 712 & original_order == 10))
  df_medal_clean = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% 
    mutate(temp = lead(trigger_number), 
           trigger_number = case_when(trigger_number == temp ~ (temp -1), TRUE ~ trigger_number)) %>%
    select(-temp)
  #Take the rows out that have 'NA'for the weekday since form wasn't finished and there is no data for PA, NA, or Memory
  df_medal_clean = df_medal_clean %>% filter(!is.na(weekday))
  df_medal_clean$original_order <- 1:nrow(df_medal_clean) 

# Create expanded df with all potential datapoints

df_medal_day = df_medal_clean %>% 
  dplyr::select('subjectcode', 'weekday', 'original_order') %>% 
  dplyr::mutate(weekday = as.numeric(weekday)) %>%
  dplyr::group_by(subjectcode) %>%
  dplyr:: distinct(weekday, .keep_all =T) %>% 
  dplyr::ungroup()

df_medal_day = df_medal_day %>% 
  slice(rep(1:n(), each = 7)) %>% 
  dplyr::mutate(trigger_number = rep(1:7, length.out=n())) %>% 
  dplyr::rename(order = original_order)


#merge with main dataframe
df_medal_clean =  merge(df_medal_day, df_medal_clean, by=c('subjectcode', 'trigger_number', 'weekday'), all= T) 
df_medal_clean = df_medal_clean %>% mutate(order = as.numeric(order))
df_medal_clean = arrange(df_medal_clean, order)
  1. Centre, scale, and average Variables

used_vars = c("neg_mood", "rec_mem_neg_mood", "rem_mem_neg_mood", "pos_mood", "rec_mem_pos_mood", "rem_mem_pos_mood")

#Centering & scaling 
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  # Center and scale
  dplyr::mutate( across(used_vars, ~ (.x/10), .names = "{.col}" ),
                 across(used_vars, ~ mean(.x, na.rm=T), .names = "{.col}_m" ),
                 across(used_vars, ~ (.x - mean(.x, na.rm=T)), .names = "{.col}_c" ), 
                 across(paste0(used_vars, "_c"), ~ DescTools::Winsorize(.x, na.rm = T), .names = "{.col}")) %>%  
  ungroup() %>%
  # Rescale to positive
  mutate(across(c(paste0(used_vars,"_c")), ~ abs(min(.x, na.rm = T)) + 1 + .x, .names = "{.col}s"))
  1. Lag variables
# Remote 

df_medal_clean = df_medal_clean %>%
  dplyr::group_by(subjectcode) %>%
  mutate(rem_mem_pos_mood_lag = lead(rem_mem_pos_mood, n=7, order_by=subjectcode),
         rem_mem_neg_mood_lag = lead(rem_mem_neg_mood, n=7, order_by=subjectcode),
         pos_mood_lag_rem = lead(pos_mood, n=7, order_by=subjectcode),
         neg_mood_lag_rem = lead(neg_mood, n=7, order_by=subjectcode), 
         rem_mem_pos_mood_cs_lag = lead(rem_mem_pos_mood_cs, n=7, order_by=subjectcode),
         rem_mem_neg_mood_cs_lag = lead(rem_mem_neg_mood_cs, n=7, order_by=subjectcode),
         pos_mood_cs_lag_rem = lead(pos_mood_cs, n=7, order_by=subjectcode),
         neg_mood_cs_lag_rem = lead(neg_mood_cs, n=7, order_by=subjectcode),
         rem_mem_pos_mood_c_lag = lead(rem_mem_pos_mood_c, n=7, order_by=subjectcode),
         rem_mem_neg_mood_c_lag = lead(rem_mem_neg_mood_c, n=7, order_by=subjectcode),
         pos_mood_c_lag_rem = lead(pos_mood_c, n=7, order_by=subjectcode),
         neg_mood_c_lag_rem = lead(neg_mood_c, n=7, order_by=subjectcode)) %>% 
  filter(!is.na(original_order)) # filter out the NA rows for the lagging of recent memory 

# Recent
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  mutate(rec_mem_pos_mood_lag = lead(rec_mem_pos_mood, n=1, order_by=subjectcode),
         rec_mem_neg_mood_lag = lead(rec_mem_neg_mood, n=1, order_by=subjectcode),
         pos_mood_lag_rec = lead(pos_mood, n=1, order_by=subjectcode),
         neg_mood_lag_rec = lead(neg_mood, n=1, order_by=subjectcode),
         rec_mem_pos_mood_cs_lag = lead(rec_mem_pos_mood_cs, n=1, order_by=subjectcode),
         rec_mem_neg_mood_cs_lag = lead(rec_mem_neg_mood_cs, n=1, order_by=subjectcode),
         pos_mood_cs_lag_rec = lead(pos_mood_cs, n=1, order_by=subjectcode),
         neg_mood_cs_lag_rec = lead(neg_mood_cs, n=1, order_by=subjectcode),
         rec_mem_pos_mood_c_lag = lead(rec_mem_pos_mood_c, n=1, order_by=subjectcode),
         rec_mem_neg_mood_c_lag = lead(rec_mem_neg_mood_c, n=1, order_by=subjectcode),
         pos_mood_c_lag_rec = lead(pos_mood_c, n=1, order_by=subjectcode),
         neg_mood_c_lag_rec = lead(neg_mood_c, n=1, order_by=subjectcode)) %>% 
  ungroup()
  

Descriptives

We first plot some general population descriptives, followed by some descriptives of the scales we use in the EMA weeks and compliance rates for both positive and negative mood.

Population

Demographics Table

#Create Descriptives Tables
summary_table = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE) %>% group_by(condition) %>% summarise(
  'N' = n(),
  'Age \n M +/- SD' = paste0(round(mean(age, na.rm =T), 1), ' +/- ' , round(sd(age, na.rm =T), 1)),
  #'NA Age' = sum(is.na(age)), 
  '% Education Low' = mean(education == 0, na.rm = T)*100,
  '% Education Middle' = mean(education == 1, na.rm = T)*100,
  '% Education High' = mean(education == 2, na.rm = T)*100,
  #'NA Education' = sum(is.na(education)), 
  '% Female' = mean(gender ==1, na.rm = T)*100,
  #'NA Gender' = sum(is.na(gender))
)

#Compliance rates
df_medal_compliance_individual = df_medal_clean %>% group_by(condition, subjectcode) %>% summarise('ComplianceRate' = sum(!is.na(trigger_number))/42*100)
`summarise()` has grouped output by 'condition'. You can override using the `.groups` argument.
length(which(df_medal_compliance_individual$ComplianceRate <70))
[1] 8
df_medal_compliance = df_medal_clean %>% group_by(condition) %>% summarise('% Mean Compliance' = sum(!is.na(trigger_number))/(n_distinct(subjectcode) *42)*100) 


# Create a table
summary_table = summary_table %>% left_join(df_medal_compliance, by = 'condition')
rempsyc::nice_table(summary_table)

condition

N

Age
M +/- SD

% Education Low

% Education Middle

% Education High

% Female

% Mean Compliance

control

55

51.7 +/- 10.9

0.00

22.64

77.36

78.18

92.42

remitted

90

52.5 +/- 11.1

3.49

16.28

80.23

78.41

91.88

depressed

46

48.7 +/- 11.2

4.35

30.43

65.22

71.74

92.65

Tests

#create dataset with only unique subjectcode 
df_medal_distinct = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE)%>% select('subjectcode', 'condition', 'age', 'education', 'gender')

#Test for differences between groups 
  #Age
  df_medal_distinct %>% lm(age ~ condition, data=.) %>% summary()

Call:
lm(formula = age ~ condition, data = .)

Residuals:
    Min      1Q  Median      3Q     Max 
-26.717  -6.511   2.489   7.489  18.283 

Coefficients:
                   Estimate Std. Error t value Pr(>|t|)    
(Intercept)         51.6727     1.4876  34.736   <2e-16 ***
conditionremitted    0.8386     1.8963   0.442    0.659    
conditiondepressed  -2.9553     2.2042  -1.341    0.182    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 11.03 on 186 degrees of freedom
  (2 observations deleted due to missingness)
Multiple R-squared:  0.0192,    Adjusted R-squared:  0.008657 
F-statistic: 1.821 on 2 and 186 DF,  p-value: 0.1648
  
  #education
  chisq.test(df_medal_distinct$education, df_medal_distinct$condition)
Warning in stats::chisq.test(x, y, ...) :
  Chi-squared approximation may be incorrect

    Pearson's Chi-squared test

data:  df_medal_distinct$education and df_medal_distinct$condition
X-squared = 5.8234, df = 4, p-value = 0.2127
  
  #gender
  chisq.test(df_medal_distinct$gender, df_medal_distinct$condition)

    Pearson's Chi-squared test

data:  df_medal_distinct$gender and df_medal_distinct$condition
X-squared = 0.84533, df = 2, p-value = 0.6553

Plot

#Create a Graph  
  #age
  boxplot_age <- ggplot(df_medal_distinct, aes(x=condition, y=age, fill=condition)) +
    geom_boxplot() +
    labs(x = 'Condition', y = 'Age', tag = 'C' ) +
    ggtitle('Age per Condition') +
    scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
    theme_apa() 
  ggplotly(boxplot_age)
Warning: Removed 2 rows containing non-finite values (`stat_boxplot()`).
    
    #education
    plot_educ <- ggplot(df_medal_distinct, aes(x=condition, fill = education)) +
      geom_bar(position = 'dodge') +
      labs(x='Condition', y= 'Proportion', tag = 'A' ) +
      ggtitle('Education Level per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Low' , 'Middle' , 'High'), name = 'Education')  +
      theme_apa() 
    ggplotly(plot_educ)
      
    #Gender
    plot_gender <- ggplot(df_medal_distinct, aes(x=condition, fill = gender)) +
      geom_bar(position = 'dodge') +
      labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
      ggtitle('Gender per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
      theme_apa() 
    ggplotly(plot_gender)
      
     plot_gender_2 <-  ggstatsplot::ggbarstats(data = df_medal_distinct, x = gender, y = condition) +
        labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
        ggtitle('Gender per Condition') +
        scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
        theme_apa() 
Scale for fill is already present.
Adding another scale for fill, which will replace the existing scale.
      ggplotly(plot_gender_2)
Warning in geom2trace.default(dots[[1L]][[1L]], dots[[2L]][[1L]], dots[[3L]][[1L]]) :
  geom_GeomLabel() has yet to be implemented in plotly.
  If you'd like to see this geom implemented,
  Please open an issue with your example code at
  https://github.com/ropensci/plotly/issues
ggpubr::ggarrange(plot_educ, plot_gender, boxplot_age, nrow=2, ncol=2)    
Warning: Removed 2 rows containing non-finite values (`stat_boxplot()`).

# Subset by compliance rates
compliant_subs = df_medal_compliance_individual %>% filter(ComplianceRate >= 60) 
df_medal_clean = df_medal_clean[df_medal_clean$subjectcode %in% compliant_subs$subjectcode ,]
    

Positive Mood

Current Mood

describe.by(df_medal_clean$pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 12 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 12 rows containing non-finite values (`stat_bin()`).

Recent Memory

describe.by(df_medal_clean$rec_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 4233 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 4233 rows containing non-finite values (`stat_bin()`).

Remote Memory

describe.by(df_medal_clean$rem_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 6547 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 6547 rows containing non-finite values (`stat_bin()`).

Negative Mood

Current Mood

describe.by(df_medal_clean$neg_mood, group=df_medal_clean$condition)%>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 16 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 16 rows containing non-finite values (`stat_bin()`).

ggplot(data= df_medal_clean, aes(x=neg_mood_cs)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 16 rows containing non-finite values (`stat_bin()`).

Recent Memory

describe.by(df_medal_clean$rec_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 4233 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 4233 rows containing non-finite values (`stat_bin()`).

Remote Memory

describe.by(df_medal_clean$rem_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 6547 rows containing non-finite values (`stat_bin()`).

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
`stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
Warning: Removed 6547 rows containing non-finite values (`stat_bin()`).

3 Main Effects

A simple linear regression analysis was done to look at the differences in positive and negative mood ratings per condition.

3.1 Linear Mixed Models for PA and NA

#positive mood 
positive_model = lmer(pos_mood ~ condition + (1|subjectcode), data = df_medal_clean)


#negative mood
negative_model = lmer(neg_mood ~ condition + (1|subjectcode), data = df_medal_clean)



asis_output(tab_model (positive_model, negative_model, 
                      show.se = T, show.df = T, show.aic = T, transform = NULL,  
                      show.stat = T, show.std = T, 
                      title = 'Condition Differences Mood Rating', dv.labels = c('PA Scaled',  'NA Scaled') )$knitr)
Condition Differences Mood Rating
  PA Scaled NA Scaled
Predictors Estimates std. Error std. Beta standardized std. Error CI standardized CI Statistic p df Estimates std. Error std. Beta standardized std. Error CI standardized CI Statistic p df
(Intercept) 7.49 0.17 0.56 0.08 7.15 – 7.83 0.40 – 0.71 43.56 <0.001 7365.00 1.14 0.20 -0.63 0.08 0.74 – 1.53 -0.79 – -0.47 5.65 <0.001 7361.00
condition [remitted] -1.10 0.22 -0.50 0.10 -1.53 – -0.67 -0.70 – -0.31 -5.04 <0.001 7365.00 1.42 0.26 0.58 0.10 0.92 – 1.93 0.37 – 0.78 5.53 <0.001 7361.00
condition [depressed] -2.90 0.25 -1.32 0.12 -3.40 – -2.40 -1.55 – -1.10 -11.40 <0.001 7365.00 3.68 0.30 1.49 0.12 3.10 – 4.27 1.26 – 1.73 12.32 <0.001 7361.00
Random Effects
σ2 2.12 2.12
τ00 1.57 subjectcode 2.18 subjectcode
ICC 0.43 0.51
N 189 subjectcode 189 subjectcode
Observations 7370 7366
Marginal R2 / Conditional R2 0.234 / 0.560 0.296 / 0.653
AIC 27097.145 27153.068

Plot


#create boxplot for negative and positive affect for each group with significance levels 
combine_data = df_medal_clean %>%
  tidyr::pivot_longer(cols=c(pos_mood, neg_mood), names_to = 'mood_type', values_to = 'mood_rating') %>% 
  dplyr::mutate(mood_type = factor(mood_type, levels = c('pos_mood' , 'neg_mood')))

# Plot
lm_plot = ggplot(combine_data, aes(x=condition, y = mood_rating, fill= condition)) + 
  geom_boxplot() +
  labs(x ='Group', y ='Mood (scaled units)' ) +
  scale_x_discrete(labels = c('control' = 'Never Depressed', 'remitted' = 'Remitted', 'depressed' = 'Depressed' )) +
  facet_wrap( ~ mood_type, ncol =3, nrow = 2, labeller  = labeller(mood_type = c('pos_mood' = 'Positive Mood', 'neg_mood'  = 'Negative Mood'))) +
  geom_signif(comparisons = list(c('control', 'remitted'), c('control' , 'depressed'), c('remitted' , 'depressed' )), map_signif_level = T, y_position = c(15, 13, 11)) +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  theme_apa() + 
  guides(fill=F)
ggplotly(lm_plot)
ggsave('figure_1_main_effects.pdf', dpi = 320, width = 12, height = 8, path = "figures/")

Follow-Up

emmeans::emmeans(positive_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
$emmeans
 condition emmean    SE  df lower.CL upper.CL
 control     7.49 0.172 186     7.15     7.83
 remitted    6.39 0.136 186     6.12     6.65
 depressed   4.59 0.188 186     4.21     4.96

Degrees-of-freedom method: kenward-roger 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df t.ratio p.value
 control - remitted        1.1 0.219 186   5.038  <.0001
 control - depressed       2.9 0.255 186  11.400  <.0001
 remitted - depressed      1.8 0.232 186   7.760  <.0001

Degrees-of-freedom method: kenward-roger 
P value adjustment: tukey method for comparing a family of 3 estimates 
emmeans::emmeans(negative_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
$emmeans
 condition emmean    SE  df lower.CL upper.CL
 control     1.14 0.202 186    0.741     1.54
 remitted    2.56 0.159 186    2.247     2.88
 depressed   4.82 0.221 186    4.388     5.26

Degrees-of-freedom method: kenward-roger 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df t.ratio p.value
 control - remitted      -1.42 0.257 186  -5.530  <.0001
 control - depressed     -3.68 0.299 186 -12.322  <.0001
 remitted - depressed    -2.26 0.272 186  -8.309  <.0001

Degrees-of-freedom method: kenward-roger 
P value adjustment: tukey method for comparing a family of 3 estimates 

4 Hypothesis 1: Independent Models

In this section we test the first models from hypothesis 1 as stated in the pre-registraiton. These analyses look at the relationship between recent and remote emotional memory for both positive and negative affect, while also looking at group differences in these relaitonships based on depression status.

# set model families
model_families = c('gauss-link', "gauss-log", 'gauss-inv', 'Gamma-link', 'Gamma-log')
# detect cores for later
ncores = detectCores()-1

Positive Affect

First we run the positive affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects.

Recent

# Model equation
h1_post_recent_eq = "pos_mood_cs ~ 1 + condition*rec_mem_pos_mood_cs_lag + gender + age + education + (1 + rec_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_post_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_recent_models,  dv.labels = names(h1_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.478 0.378 3.737 – 5.219 <0.001 1.595 0.053 1.491 – 1.700 <0.001 0.191 0.007 0.177 – 0.205 <0.001 4.466 0.373 3.736 – 5.197 <0.001 1.593 0.054 1.488 – 1.698 <0.001
condition [remitted] -0.744 0.413 -1.554 – 0.067 0.072 -0.096 0.059 -0.212 – 0.020 0.104 0.012 0.008 -0.004 – 0.028 0.133 -0.819 0.399 -1.601 – -0.038 0.040 -0.113 0.058 -0.227 – 0.002 0.054
condition [depressed] -1.176 0.456 -2.071 – -0.281 0.010 -0.148 0.065 -0.275 – -0.020 0.023 0.017 0.009 0.000 – 0.035 0.047 -1.255 0.441 -2.119 – -0.391 0.004 -0.169 0.065 -0.296 – -0.043 0.009
rec mem pos mood cs lag 0.407 0.048 0.313 – 0.501 <0.001 0.057 0.007 0.044 – 0.070 <0.001 -0.008 0.001 -0.010 – -0.006 <0.001 0.407 0.047 0.315 – 0.498 <0.001 0.057 0.007 0.044 – 0.071 <0.001
gender [1] 0.014 0.048 -0.080 – 0.108 0.766 0.000 0.007 -0.012 – 0.013 0.964 0.000 0.001 -0.002 – 0.002 0.898 0.014 0.050 -0.084 – 0.113 0.777 0.000 0.007 -0.013 – 0.014 0.983
age 0.001 0.002 -0.003 – 0.004 0.750 -0.000 0.000 -0.001 – 0.000 0.963 0.000 0.000 -0.000 – 0.000 0.777 0.001 0.002 -0.003 – 0.005 0.634 0.000 0.000 -0.001 – 0.001 0.976
education [1] -0.008 0.131 -0.265 – 0.249 0.952 -0.002 0.018 -0.037 – 0.033 0.901 0.000 0.002 -0.004 – 0.005 0.862 -0.019 0.137 -0.289 – 0.250 0.888 -0.004 0.019 -0.041 – 0.033 0.837
education [2] 0.018 0.126 -0.228 – 0.265 0.884 0.001 0.017 -0.032 – 0.035 0.931 -0.000 0.002 -0.005 – 0.005 0.967 0.011 0.132 -0.247 – 0.270 0.932 0.000 0.018 -0.035 – 0.036 0.990
condition [remitted] *
rec mem pos mood cs lag
0.103 0.059 -0.012 – 0.218 0.078 0.013 0.008 -0.003 – 0.029 0.112 -0.002 0.001 -0.004 – 0.001 0.143 0.115 0.057 0.003 – 0.226 0.044 0.015 0.008 -0.001 – 0.031 0.060
condition [depressed] *
rec mem pos mood cs lag
0.159 0.065 0.032 – 0.286 0.014 0.019 0.009 0.002 – 0.037 0.033 -0.002 0.001 -0.005 – 0.000 0.065 0.171 0.063 0.048 – 0.294 0.007 0.022 0.009 0.005 – 0.040 0.013
Random Effects
σ2 1.21 1.20 1.22 0.02 0.02
τ00 2.03 subjectcode 0.04 subjectcode 0.00 subjectcode 1.87 subjectcode 0.04 subjectcode
τ11 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC       0.72  
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3083 3083 3083 3083 3083
Marginal R2 / Conditional R2 0.267 / NA 0.007 / NA 0.000 / NA 0.839 / 0.956 0.268 / NA
AIC 9508.589 9475.908 9495.430 9815.615 9825.603
summary(h1_pos_recent_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ 1 + condition * rec_mem_pos_mood_cs_lag + gender +      age + education + (1 + rec_mem_pos_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9475.9   9560.4  -4724.0   9447.9     3069 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.0701 -0.5626  0.0788  0.5944  5.5868 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr 
 subjectcode (Intercept)             0.041324 0.20328       
             rec_mem_pos_mood_cs_lag 0.000798 0.02825  -1.00
 Residual                            1.204853 1.09766       
Number of obs: 3083, groups:  subjectcode, 185

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.595e+00  5.319e-02  29.995   <2e-16 ***
conditionremitted                          -9.611e-02  5.903e-02  -1.628   0.1035    
conditiondepressed                         -1.476e-01  6.509e-02  -2.268   0.0233 *  
rec_mem_pos_mood_cs_lag                     5.728e-02  6.712e-03   8.533   <2e-16 ***
gender1                                     2.969e-04  6.526e-03   0.045   0.9637    
age                                        -1.163e-05  2.530e-04  -0.046   0.9633    
education1                                 -2.237e-03  1.792e-02  -0.125   0.9006    
education2                                  1.478e-03  1.718e-02   0.086   0.9314    
conditionremitted:rec_mem_pos_mood_cs_lag   1.307e-02  8.228e-03   1.589   0.1121    
conditiondepressed:rec_mem_pos_mood_cs_lag  1.930e-02  9.039e-03   2.135   0.0327 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.739                                                              
cndtndprssd  -0.672  0.601                                                       
rc_mm_ps___  -0.894  0.809  0.734                                                
gender1      -0.110 -0.021 -0.007 -0.031                                         
age          -0.276 -0.007  0.019 -0.007  0.138                                  
education1   -0.314  0.015 -0.004 -0.010  0.075  0.035                           
education2   -0.342  0.024  0.007  0.002  0.014  0.075  0.925                    
cndtnr:_____  0.733 -0.994 -0.599 -0.816  0.022  0.007 -0.003 -0.015             
cndtnd:_____  0.664 -0.601 -0.993 -0.742  0.015 -0.004  0.013  0.005  0.605      
plot_diagnostics(h1_pos_recent_models)

car::vif(h1_pos_recent_models[[2]]) #Vifs were inspected without interaction and deemed OK
                                         GVIF Df GVIF^(1/(2*Df))
condition                         5803.138897  2        8.728020
rec_mem_pos_mood_cs_lag              4.225432  1        2.055586
gender                               1.058508  1        1.028838
age                                  1.058711  1        1.028937
education                            1.079968  2        1.019419
condition:rec_mem_pos_mood_cs_lag 6079.463157  2        8.830114
Follow-up

In the follow-up we look at the pairwise differences in the slopes, and the indiivuds

emmeans::emtrends(h1_pos_recent_models[[2]], pairwise ~ condition, var='rec_mem_pos_mood_cs_lag', lmerTest.limit = 3500, pbkrtest.limit = 3500 )
$emtrends
 condition rec_mem_pos_mood_cs_lag.trend      SE  df asymp.LCL asymp.UCL
 control                          0.0573 0.00671 Inf    0.0441    0.0704
 remitted                         0.0704 0.00476 Inf    0.0610    0.0797
 depressed                        0.0766 0.00606 Inf    0.0647    0.0885

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate      SE  df z.ratio p.value
 control - remitted   -0.01307 0.00823 Inf  -1.589  0.2504
 control - depressed  -0.01930 0.00904 Inf  -2.135  0.0828
 remitted - depressed -0.00623 0.00770 Inf  -0.808  0.6978

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 
#create separate dataframes 
df_medal_clean_control = df_medal_clean %>% filter(condition == 'control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition == 'remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition == 'depressed') 


m1_pos_rec_control <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  +  rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rec_remitted <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1 + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rec_depressed <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
asis_output(tab_model(m1_pos_rec_control, m1_pos_rec_remitted, m1_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.41 0.39 3.65 – 5.18 <0.001 3.76 0.34 3.09 – 4.42 <0.001 3.34 0.42 2.51 – 4.17 <0.001
rec mem pos mood cs lag 0.40 0.05 0.31 – 0.50 <0.001 0.51 0.03 0.44 – 0.58 <0.001 0.56 0.04 0.48 – 0.64 <0.001
gender [1] -0.05 0.08 -0.21 – 0.10 0.515 0.00 0.08 -0.14 – 0.15 0.955 0.11 0.10 -0.09 – 0.31 0.277
age 0.00 0.00 -0.00 – 0.01 0.390 0.00 0.00 -0.00 – 0.01 0.752 -0.00 0.00 -0.01 – 0.01 0.529
education [2] 0.05 0.08 -0.11 – 0.20 0.565 -0.01 0.16 -0.33 – 0.30 0.938 0.09 0.23 -0.37 – 0.55 0.692
education [1] -0.03 0.17 -0.37 – 0.31 0.849 0.06 0.24 -0.42 – 0.54 0.802
Random Effects
σ2 0.94 1.20 1.53
τ00 2.44 subjectcode 2.33 subjectcode 0.95 subjectcode
τ11 0.05 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.02 subjectcode.rec_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 869 1451 763
Marginal R2 / Conditional R2 0.187 / NA 0.275 / NA 0.299 / NA
AIC 2488.394 4483.637 2544.869

Remote

# Model equation
h1_pos_remote_eq = "pos_mood_cs ~ 1 +  condition*rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_remote_models, dv.labels = names(h1_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.225 0.747 3.759 – 6.690 <0.001 1.710 0.101 1.512 – 1.908 <0.001 0.174 0.013 0.148 – 0.200 <0.001 5.395 0.808 3.809 – 6.982 <0.001 1.741 0.113 1.519 – 1.964 <0.001
condition [remitted] -0.423 0.778 -1.950 – 1.104 0.587 -0.062 0.105 -0.269 – 0.145 0.556 0.009 0.014 -0.018 – 0.036 0.519 -0.691 0.828 -2.317 – 0.934 0.404 -0.133 0.117 -0.363 – 0.098 0.259
condition [depressed] -0.446 0.825 -2.066 – 1.174 0.589 -0.060 0.112 -0.280 – 0.159 0.590 0.009 0.015 -0.020 – 0.037 0.554 -0.663 0.885 -2.399 – 1.074 0.454 -0.083 0.125 -0.329 – 0.163 0.506
rem mem pos mood cs lag 0.349 0.109 0.134 – 0.563 0.001 0.047 0.015 0.018 – 0.076 0.001 -0.006 0.002 -0.010 – -0.002 0.001 0.308 0.117 0.079 – 0.537 0.008 0.042 0.016 0.010 – 0.074 0.010
gender [1] -0.104 0.101 -0.302 – 0.095 0.305 -0.015 0.014 -0.042 – 0.012 0.269 0.002 0.002 -0.002 – 0.006 0.288 -0.116 0.117 -0.346 – 0.115 0.325 -0.021 0.016 -0.053 – 0.010 0.187
age 0.001 0.004 -0.007 – 0.009 0.768 0.000 0.001 -0.001 – 0.001 0.789 -0.000 0.000 -0.000 – 0.000 0.775 0.003 0.005 -0.006 – 0.012 0.548 0.000 0.001 -0.001 – 0.001 0.873
education [1] 0.032 0.293 -0.543 – 0.608 0.912 0.003 0.040 -0.076 – 0.082 0.941 -0.001 0.006 -0.012 – 0.010 0.909 -0.019 0.332 -0.670 – 0.632 0.954 -0.000 0.046 -0.091 – 0.091 0.998
education [2] 0.092 0.283 -0.463 – 0.647 0.745 0.011 0.039 -0.065 – 0.087 0.770 -0.002 0.005 -0.012 – 0.009 0.752 0.053 0.320 -0.574 – 0.680 0.868 0.007 0.045 -0.080 – 0.095 0.868
condition [remitted] *
rem mem pos mood cs lag
0.088 0.131 -0.168 – 0.344 0.500 0.013 0.017 -0.022 – 0.047 0.473 -0.002 0.002 -0.006 – 0.003 0.432 0.139 0.141 -0.138 – 0.415 0.325 0.025 0.020 -0.014 – 0.063 0.207
condition [depressed] *
rem mem pos mood cs lag
0.087 0.138 -0.185 – 0.358 0.531 0.011 0.018 -0.025 – 0.047 0.541 -0.002 0.002 -0.006 – 0.003 0.508 0.132 0.150 -0.164 – 0.427 0.381 0.016 0.021 -0.025 – 0.057 0.446
Random Effects
σ2 1.33 1.33 1.37 0.02 0.02
τ00 3.96 subjectcode 0.07 subjectcode 0.00 subjectcode 6.47 subjectcode 0.13 subjectcode
τ11 0.11 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag 0.18 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -0.99 subjectcode -0.99 subjectcode
ICC       0.94 0.25
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 774 774 774 774 774
Marginal R2 / Conditional R2 0.159 / NA 0.003 / NA 0.000 / NA 0.383 / 0.966 0.144 / 0.355
AIC 2534.494 2518.754 2523.993 2499.603 2515.410
summary(h1_pos_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ 1 + condition * rem_mem_pos_mood_cs_lag + gender +      age + education + (1 + rem_mem_pos_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2518.8   2583.9  -1245.4   2490.8      760 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0396 -0.5342  0.1306  0.5981  3.4033 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr 
 subjectcode (Intercept)             0.067209 0.25925       
             rem_mem_pos_mood_cs_lag 0.001777 0.04216  -1.00
 Residual                            1.334534 1.15522       
Number of obs: 774, groups:  subjectcode, 184

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.7097313  0.1009030  16.944  < 2e-16 ***
conditionremitted                          -0.0621833  0.1054371  -0.590  0.55535    
conditiondepressed                         -0.0602632  0.1117087  -0.539  0.58956    
rem_mem_pos_mood_cs_lag                     0.0469370  0.0146100   3.213  0.00132 ** 
gender1                                    -0.0150032  0.0135714  -1.106  0.26894    
age                                         0.0001425  0.0005332   0.267  0.78924    
education1                                  0.0029878  0.0400795   0.075  0.94058    
education2                                  0.0112801  0.0386343   0.292  0.77031    
conditionremitted:rem_mem_pos_mood_cs_lag   0.0125778  0.0174997   0.719  0.47230    
conditiondepressed:rem_mem_pos_mood_cs_lag  0.0112689  0.0184428   0.611  0.54119    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.728                                                              
cndtndprssd  -0.695  0.654                                                       
rm_mm_ps___  -0.857  0.826  0.780                                                
gender1      -0.122 -0.030 -0.022 -0.040                                         
age          -0.305 -0.011  0.023 -0.005  0.137                                  
education1   -0.370  0.016  0.011 -0.013  0.086  0.022                           
education2   -0.401  0.028  0.024  0.002  0.025  0.061  0.934                    
cndtnr:_____  0.720 -0.992 -0.651 -0.835  0.032  0.012 -0.003 -0.018             
cndtnd:_____  0.683 -0.655 -0.990 -0.792  0.033  0.001 -0.001 -0.010  0.661      
plot_diagnostics(h1_pos_remote_models)

car::vif(h1_pos_remote_models[[2]]) #VIFs without interactions were fine
                                         GVIF Df GVIF^(1/(2*Df))
condition                         2534.223885  2        7.095145
rem_mem_pos_mood_cs_lag              4.998392  1        2.235708
gender                               1.066612  1        1.032769
age                                  1.066404  1        1.032668
education                            1.086302  2        1.020910
condition:rem_mem_pos_mood_cs_lag 2735.029265  2        7.231702
Follow-up
emmeans::emtrends(h1_pos_remote_models[[4]], pairwise ~ condition, var='rem_mem_pos_mood_cs_lag' )
$emtrends
 condition rem_mem_pos_mood_cs_lag.trend     SE  df asymp.LCL asymp.UCL
 control                           0.308 0.1167 Inf    0.0792     0.536
 remitted                          0.447 0.0788 Inf    0.2921     0.601
 depressed                         0.440 0.0951 Inf    0.2533     0.626

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df z.ratio p.value
 control - remitted   -0.13876 0.141 Inf  -0.986  0.5860
 control - depressed  -0.13184 0.150 Inf  -0.876  0.6554
 remitted - depressed  0.00693 0.124 Inf   0.056  0.9983

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 

m1_pos_rem_control <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_control,
                    control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_remitted <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_remitted,
                     control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rem_depressed <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                       data=df_medal_clean_depressed,
                       control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
asis_output(tab_model(m1_pos_rem_control, m1_pos_rem_remitted, m1_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.32 0.87 3.62 – 7.03 <0.001 4.96 0.70 3.58 – 6.33 <0.001 4.55 0.78 3.01 – 6.09 <0.001
rem mem pos mood cs lag 0.33 0.13 0.08 – 0.59 0.011 0.43 0.07 0.29 – 0.57 <0.001 0.45 0.07 0.31 – 0.58 <0.001
gender [1] -0.08 0.17 -0.41 – 0.24 0.613 -0.08 0.17 -0.42 – 0.26 0.645 -0.15 0.20 -0.55 – 0.25 0.469
age 0.00 0.01 -0.01 – 0.01 0.801 0.00 0.01 -0.01 – 0.01 0.908 0.00 0.01 -0.01 – 0.02 0.763
education [2] 0.04 0.17 -0.29 – 0.37 0.792 0.00 0.40 -0.78 – 0.78 0.994 0.20 0.50 -0.79 – 1.18 0.697
education [1] -0.16 0.42 -0.99 – 0.67 0.704 0.26 0.52 -0.76 – 1.28 0.614
Random Effects
σ2 0.73 1.56 1.52
τ00 13.15 subjectcode 3.36 subjectcode 0.90 subjectcode
τ11 0.39 subjectcode.rem_mem_pos_mood_cs_lag 0.08 subjectcode.rem_mem_pos_mood_cs_lag 0.02 subjectcode.rem_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC 0.37    
N 53 subjectcode 85 subjectcode 46 subjectcode
Observations 225 358 191
Marginal R2 / Conditional R2 0.080 / 0.419 0.148 / NA 0.203 / NA
AIC 657.151 1232.815 658.982
NA

Negative Affect

We next run the negative affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects

Recent

# Model equation
h1_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag  + gender + age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)"
# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = 'remove') %dopar% {
  fit_all_mods(h1_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_recent_models,  dv.labels = names(h1_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.287 0.436 3.431 – 5.142 <0.001 1.548 0.069 1.414 – 1.683 <0.001 0.202 0.010 0.182 – 0.222 <0.001 4.195 0.416 3.380 – 5.010 <0.001 1.530 0.066 1.401 – 1.659 <0.001
condition [remitted] -0.836 0.471 -1.759 – 0.088 0.076 -0.137 0.074 -0.283 – 0.008 0.065 0.021 0.011 -0.000 – 0.043 0.051 -0.772 0.440 -1.636 – 0.091 0.080 -0.134 0.070 -0.271 – 0.003 0.056
condition [depressed] -1.319 0.513 -2.326 – -0.313 0.010 -0.232 0.082 -0.393 – -0.072 0.005 0.037 0.012 0.013 – 0.061 0.003 -1.233 0.474 -2.164 – -0.303 0.009 -0.227 0.076 -0.376 – -0.077 0.003
rec mem neg mood cs lag 0.321 0.061 0.202 – 0.440 <0.001 0.048 0.009 0.030 – 0.067 <0.001 -0.007 0.001 -0.010 – -0.005 <0.001 0.338 0.057 0.227 – 0.450 <0.001 0.052 0.009 0.035 – 0.070 <0.001
gender [1] 0.004 0.048 -0.091 – 0.098 0.940 -0.002 0.008 -0.017 – 0.013 0.779 0.001 0.001 -0.002 – 0.003 0.625 0.005 0.049 -0.091 – 0.101 0.922 -0.003 0.008 -0.018 – 0.013 0.751
age -0.003 0.002 -0.007 – 0.000 0.081 -0.001 0.000 -0.001 – -0.000 0.021 0.000 0.000 0.000 – 0.000 0.008 -0.003 0.002 -0.007 – 0.000 0.075 -0.001 0.000 -0.001 – -0.000 0.012
education [1] -0.005 0.132 -0.264 – 0.255 0.972 -0.001 0.021 -0.042 – 0.041 0.972 0.000 0.003 -0.007 – 0.007 0.966 -0.019 0.136 -0.285 – 0.247 0.887 -0.004 0.022 -0.048 – 0.039 0.849
education [2] -0.058 0.127 -0.306 – 0.191 0.648 -0.010 0.020 -0.050 – 0.030 0.616 0.002 0.003 -0.005 – 0.008 0.610 -0.072 0.130 -0.327 – 0.183 0.580 -0.014 0.021 -0.056 – 0.027 0.495
condition [remitted] *
rec mem neg mood cs lag
0.138 0.071 -0.002 – 0.278 0.053 0.022 0.011 0.000 – 0.044 0.045 -0.003 0.002 -0.007 – -0.000 0.034 0.127 0.067 -0.004 – 0.258 0.057 0.022 0.010 0.001 – 0.042 0.037
condition [depressed] *
rec mem neg mood cs lag
0.205 0.077 0.053 – 0.357 0.008 0.035 0.012 0.011 – 0.059 0.004 -0.005 0.002 -0.009 – -0.002 0.002 0.191 0.072 0.050 – 0.332 0.008 0.034 0.011 0.012 – 0.056 0.003
Random Effects
σ2 1.22 1.21 1.23 0.03 0.03
τ00 2.54 subjectcode 0.07 subjectcode 0.00 subjectcode 1.94 subjectcode 0.05 subjectcode
τ11 0.06 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.04 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC       0.73 0.07
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3081 3081 3081 3081 3081
Marginal R2 / Conditional R2 0.240 / NA 0.008 / NA 0.000 / NA 0.777 / 0.939 0.233 / 0.284
AIC 9562.290 9531.074 9556.132 9284.286 9331.726
NA
summary(h1_neg_recent_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rec_mem_neg_mood_cs_lag + gender +      age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9284.3   9368.7  -4628.1   9256.3     3067 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.9985 -0.5315 -0.0717  0.4158  5.9326 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr 
 subjectcode (Intercept)             1.93698  1.3918        
             rec_mem_neg_mood_cs_lag 0.04333  0.2082   -1.00
 Residual                            0.03139  0.1772        
Number of obs: 3081, groups:  subjectcode, 185

Fixed effects:
                                            Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 4.194929   0.415544  10.095  < 2e-16 ***
conditionremitted                          -0.772145   0.440460  -1.753  0.07960 .  
conditiondepressed                         -1.233353   0.474490  -2.599  0.00934 ** 
rec_mem_neg_mood_cs_lag                     0.338117   0.056874   5.945 2.76e-09 ***
gender1                                     0.004812   0.048860   0.098  0.92154    
age                                        -0.003395   0.001906  -1.781  0.07489 .  
education1                                 -0.019268   0.135743  -0.142  0.88712    
education2                                 -0.072112   0.130183  -0.554  0.57963    
conditionremitted:rec_mem_neg_mood_cs_lag   0.127339   0.066880   1.904  0.05691 .  
conditiondepressed:rec_mem_neg_mood_cs_lag  0.191051   0.071985   2.654  0.00795 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.771                                                              
cndtndprssd  -0.725  0.672                                                       
rc_mm_ng___  -0.901  0.847  0.786                                                
gender1      -0.145  0.007  0.012  0.013                                         
age          -0.279  0.003  0.025  0.008  0.136                                  
education1   -0.316  0.009  0.015  0.002  0.077  0.035                           
education2   -0.330  0.005  0.018  0.000  0.018  0.074  0.926                    
cndtnr:_____  0.763 -0.994 -0.668 -0.850 -0.006 -0.003  0.003  0.004             
cndtnd:_____  0.713 -0.669 -0.993 -0.790 -0.005 -0.009 -0.006 -0.006  0.672      
plot_diagnostics(h1_neg_recent_models)

car::vif(h1_neg_recent_models[[4]])
                                         GVIF Df GVIF^(1/(2*Df))
condition                         4642.129391  2        8.254282
rec_mem_neg_mood_cs_lag              5.275438  1        2.296832
gender                               1.056668  1        1.027944
age                                  1.058286  1        1.028730
education                            1.077266  2        1.018781
condition:rec_mem_neg_mood_cs_lag 4821.432002  2        8.332859

Follow-up

emmeans::emtrends(h1_neg_recent_models[[4]], pairwise ~ condition, var='rec_mem_neg_mood_cs_lag' )
$emtrends
 condition rec_mem_neg_mood_cs_lag.trend     SE  df asymp.LCL asymp.UCL
 control                           0.338 0.0569 Inf     0.227     0.450
 remitted                          0.465 0.0352 Inf     0.396     0.534
 depressed                         0.529 0.0441 Inf     0.443     0.616

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate     SE  df z.ratio p.value
 control - remitted    -0.1273 0.0669 Inf  -1.904  0.1375
 control - depressed   -0.1911 0.0720 Inf  -2.654  0.0217
 remitted - depressed  -0.0637 0.0564 Inf  -1.129  0.4963

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 
emmeans::emmeans(h1_neg_recent_models[[4]], identity ~ rec_mem_neg_mood_cs_lag | condition )
$emmeans
condition = control:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.22 0.0600 Inf      6.10      6.34

condition = remitted:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.29 0.0523 Inf      6.19      6.39

condition = depressed:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.25 0.0571 Inf      6.14      6.36

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
condition = control:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.22 0.0600 Inf 103.749  <.0001

condition = remitted:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.29 0.0523 Inf 120.152  <.0001

condition = depressed:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.25 0.0571 Inf 109.472  <.0001

Results are averaged over the levels of: gender, education 

m1_neg_rec_control <- glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_remitted <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_depressed <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rec_control, m1_neg_rec_remitted, m1_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 3.89 0.27 3.36 – 4.43 <0.001 3.49 0.32 2.87 – 4.11 <0.001 3.19 0.38 2.44 – 3.94 <0.001
rec mem neg mood cs lag 0.37 0.03 0.31 – 0.42 <0.001 0.42 0.03 0.37 – 0.47 <0.001 0.52 0.03 0.46 – 0.57 <0.001
gender [1] -0.05 0.07 -0.20 – 0.09 0.459 0.01 0.08 -0.15 – 0.18 0.875 -0.05 0.10 -0.25 – 0.15 0.605
age -0.00 0.00 -0.01 – 0.00 0.632 -0.00 0.00 -0.01 – 0.00 0.590 -0.00 0.00 -0.01 – 0.01 0.815
education [2] 0.01 0.07 -0.13 – 0.15 0.905 0.09 0.18 -0.26 – 0.45 0.614 -0.34 0.24 -0.81 – 0.13 0.158
education [1] 0.08 0.19 -0.30 – 0.46 0.677 -0.16 0.25 -0.65 – 0.33 0.529
Random Effects
σ2 0.02 0.04 0.04
τ00      
τ00      
τ11 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
ρ01      
ρ01      
ICC   0.07  
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 868 1450 763
Marginal R2 / Conditional R2 0.902 / NA 0.886 / 0.895 0.937 / NA
AIC 2330.748 4659.061 2505.025

Remote

# Model equation
h1_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag  + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_remote_models,   dv.labels = names(h1_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.062 0.564 2.954 – 5.170 <0.001 1.504 0.089 1.330 – 1.678 <0.001 0.208 0.014 0.181 – 0.235 <0.001 3.944 0.575 2.815 – 5.074 <0.001 1.479 0.095 1.292 – 1.667 <0.001
condition [remitted] 0.211 0.499 -0.768 – 1.190 0.672 0.030 0.077 -0.122 – 0.181 0.700 -0.004 0.012 -0.027 – 0.019 0.737 0.190 0.483 -0.758 – 1.138 0.694 0.016 0.080 -0.141 – 0.173 0.842
condition [depressed] 0.337 0.564 -0.770 – 1.443 0.551 0.038 0.089 -0.138 – 0.214 0.670 -0.003 0.014 -0.031 – 0.024 0.822 0.367 0.536 -0.686 – 1.420 0.494 0.034 0.090 -0.142 – 0.211 0.701
rem mem neg mood cs lag 0.367 0.075 0.220 – 0.515 <0.001 0.055 0.011 0.034 – 0.077 <0.001 -0.008 0.002 -0.011 – -0.005 <0.001 0.379 0.075 0.232 – 0.527 <0.001 0.057 0.012 0.034 – 0.081 <0.001
gender [1] 0.068 0.105 -0.137 – 0.274 0.514 0.011 0.017 -0.022 – 0.044 0.508 -0.002 0.003 -0.007 – 0.003 0.494 0.035 0.114 -0.189 – 0.260 0.759 0.006 0.019 -0.030 – 0.043 0.732
age -0.000 0.004 -0.008 – 0.008 0.904 -0.000 0.001 -0.001 – 0.001 0.810 0.000 0.000 -0.000 – 0.000 0.713 0.001 0.004 -0.008 – 0.009 0.860 0.000 0.001 -0.001 – 0.002 0.885
education [1] 0.305 0.310 -0.304 – 0.913 0.326 0.047 0.050 -0.052 – 0.146 0.355 -0.007 0.008 -0.023 – 0.009 0.384 0.297 0.331 -0.353 – 0.948 0.370 0.046 0.055 -0.062 – 0.154 0.402
education [2] 0.110 0.299 -0.477 – 0.697 0.713 0.015 0.049 -0.081 – 0.111 0.752 -0.002 0.008 -0.018 – 0.014 0.786 0.129 0.318 -0.496 – 0.754 0.685 0.019 0.053 -0.085 – 0.122 0.725
condition [remitted] *
rem mem neg mood cs lag
-0.045 0.092 -0.226 – 0.137 0.629 -0.006 0.014 -0.034 – 0.021 0.651 0.001 0.002 -0.003 – 0.005 0.682 -0.045 0.092 -0.226 – 0.137 0.628 -0.004 0.015 -0.033 – 0.025 0.778
condition [depressed] *
rem mem neg mood cs lag
-0.056 0.104 -0.260 – 0.148 0.592 -0.006 0.016 -0.038 – 0.025 0.707 0.000 0.002 -0.004 – 0.005 0.861 -0.063 0.102 -0.264 – 0.138 0.537 -0.005 0.017 -0.038 – 0.027 0.748
Random Effects
σ2 1.45 1.44 1.44 0.04 0.04
τ00          
τ00          
τ11 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
ρ01          
ρ01          
ICC 0.00     0.72 0.07
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 773 773 773 773 773
Marginal R2 / Conditional R2 0.097 / 0.098 0.003 / NA 0.000 / NA 0.559 / 0.877 0.097 / 0.157
AIC 2531.522 2499.854 2502.440 2501.462 2500.894
NA
summary(h1_neg_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: neg_mood_cs ~ condition * rem_mem_neg_mood_cs_lag + gender +      age + education + (0 + rem_mem_neg_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2499.9   2555.7  -1237.9   2475.9      761 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.7669 -0.5588 -0.0890  0.4442  3.5675 

Random effects:
 Groups      Name                    Variance Std.Dev.
 subjectcode rem_mem_neg_mood_cs_lag 0.000    0.000   
 Residual                            1.437    1.199   
Number of obs: 773, groups:  subjectcode, 184

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.5040613  0.0888436  16.929  < 2e-16 ***
conditionremitted                           0.0297090  0.0770269   0.386    0.700    
conditiondepressed                          0.0380952  0.0894826   0.426    0.670    
rem_mem_neg_mood_cs_lag                     0.0554120  0.0111490   4.970 6.69e-07 ***
gender1                                     0.0111608  0.0168517   0.662    0.508    
age                                        -0.0001571  0.0006535  -0.240    0.810    
education1                                  0.0466883  0.0504789   0.925    0.355    
education2                                  0.0154500  0.0488786   0.316    0.752    
conditionremitted:rem_mem_neg_mood_cs_lag  -0.0062889  0.0138796  -0.453    0.650    
conditiondepressed:rem_mem_neg_mood_cs_lag -0.0060319  0.0160570  -0.376    0.707    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.572                                                              
cndtndprssd  -0.505  0.552                                                       
rm_mm_ng___  -0.681  0.783  0.674                                                
gender1      -0.223 -0.009  0.018 -0.009                                         
age          -0.439  0.011  0.046  0.010  0.138                                  
education1   -0.548  0.019  0.006 -0.007  0.098  0.025                           
education2   -0.578  0.026  0.015  0.002  0.040  0.062  0.940                    
cndtnr:_____  0.551 -0.977 -0.542 -0.804  0.012 -0.009  0.000 -0.011             
cndtnd:_____  0.470 -0.543 -0.976 -0.694 -0.002 -0.011  0.010  0.009  0.557      
plot_diagnostics(h1_neg_remote_models)

car::vif(h1_neg_remote_models[[2]])
                                        GVIF Df GVIF^(1/(2*Df))
condition                         471.492059  2        4.659814
rem_mem_neg_mood_cs_lag             3.754178  1        1.937570
gender                              1.065883  1        1.032416
age                                 1.066221  1        1.032580
education                           1.087516  2        1.021195
condition:rem_mem_neg_mood_cs_lag 534.353082  2        4.807918

Follow-up

emmeans::emtrends(h1_neg_remote_models[[2]], pairwise ~ condition, var='rem_mem_neg_mood_cs_lag' )
$emtrends
 condition rem_mem_neg_mood_cs_lag.trend      SE  df asymp.LCL asymp.UCL
 control                          0.0554 0.01115 Inf    0.0336    0.0773
 remitted                         0.0491 0.00826 Inf    0.0329    0.0653
 depressed                        0.0494 0.01156 Inf    0.0267    0.0720

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast              estimate     SE  df z.ratio p.value
 control - remitted    0.006289 0.0139 Inf   0.453  0.8930
 control - depressed   0.006032 0.0161 Inf   0.376  0.9252
 remitted - depressed -0.000257 0.0142 Inf  -0.018  0.9998

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 

m1_neg_rem_control <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_remitted <- glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_depressed <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rem_control, m1_neg_rem_remitted, m1_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.30 0.49 3.34 – 5.26 <0.001 4.26 0.71 2.85 – 5.66 <0.001 3.90 0.82 2.28 – 5.52 <0.001
rem mem neg mood cs lag 0.37 0.06 0.26 – 0.48 <0.001 0.33 0.06 0.22 – 0.45 <0.001 0.32 0.08 0.17 – 0.47 <0.001
gender [1] 0.04 0.15 -0.27 – 0.34 0.815 0.10 0.20 -0.29 – 0.49 0.624 0.06 0.22 -0.37 – 0.50 0.775
age -0.00 0.01 -0.01 – 0.01 0.825 0.00 0.01 -0.01 – 0.01 0.888 0.00 0.01 -0.01 – 0.02 0.720
education [2] -0.06 0.16 -0.37 – 0.25 0.692 -0.10 0.46 -1.00 – 0.80 0.829 0.48 0.52 -0.54 – 1.50 0.351
education [1] 0.46 0.50 -0.52 – 1.44 0.354 0.34 0.53 -0.71 – 1.39 0.526
Random Effects
σ2 0.02 0.04 0.04
τ00      
τ00      
τ11 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
ρ01      
ρ01      
ICC 0.67 0.74 0.62
N 53 subjectcode 85 subjectcode 46 subjectcode
Observations 225 357 191
Marginal R2 / Conditional R2 0.726 / 0.911 0.546 / 0.881 0.592 / 0.844
AIC 612.907 1211.096 661.114

5 Hypothesis 2: Current Affect Moderation

For H2, we want to see whether current affective states moderate the ability to recall previous affective states. That is, do current negative affect feelings moderate the relationship between affect and recall?

#create separate dataframes for each group
df_medal_clean_control = df_medal_clean %>% filter(condition=='control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition=='remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition=='depressed')

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_clean_remote = df_medal_clean %>% filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717))

#create separate dataframes per condition 
df_medal_clean_remote_control = df_medal_clean_remote %>% filter(condition=='control')
df_medal_clean_remote_remitted = df_medal_clean_remote %>% filter(condition=='remitted')
df_medal_clean_remote_depressed = df_medal_clean_remote %>% filter(condition=='depressed')

Positive Affect

Recent

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted.

h2_pos_recent_eq = "pos_mood_cs ~ condition*rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_pos_mood_cs_lag + pos_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_recent_models,  dv.labels = names(h2_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.153 1.038 3.117 – 7.188 <0.001 1.535 0.153 1.236 – 1.835 <0.001 0.226 0.023 0.180 – 0.271 <0.001 4.397 1.054 2.331 – 6.464 <0.001 1.473 0.150 1.180 – 1.767 <0.001
condition [remitted] -1.253 1.209 -3.624 – 1.117 0.300 -0.130 0.180 -0.482 – 0.223 0.471 0.007 0.027 -0.047 – 0.060 0.809 -0.390 1.195 -2.734 – 1.953 0.744 -0.057 0.174 -0.398 – 0.285 0.745
condition [depressed] -3.786 1.341 -6.416 – -1.156 0.005 -0.568 0.200 -0.960 – -0.176 0.005 0.071 0.030 0.012 – 0.129 0.018 -2.707 1.313 -5.281 – -0.133 0.039 -0.499 0.193 -0.877 – -0.120 0.010
rec mem pos mood cs lag 0.081 0.144 -0.202 – 0.364 0.577 0.035 0.021 -0.006 – 0.075 0.094 -0.009 0.003 -0.015 – -0.002 0.006 0.158 0.149 -0.134 – 0.450 0.288 0.039 0.021 -0.001 – 0.080 0.057
pos mood cs lag rec -0.038 0.142 -0.316 – 0.239 0.786 0.017 0.020 -0.023 – 0.056 0.414 -0.006 0.003 -0.012 – 0.000 0.052 0.063 0.146 -0.223 – 0.350 0.665 0.024 0.020 -0.016 – 0.064 0.236
gender [1] 0.016 0.047 -0.076 – 0.107 0.735 0.002 0.006 -0.011 – 0.014 0.796 -0.000 0.001 -0.002 – 0.002 0.870 0.012 0.049 -0.084 – 0.109 0.803 0.001 0.007 -0.012 – 0.015 0.832
age 0.000 0.002 -0.003 – 0.004 0.956 -0.000 0.000 -0.001 – 0.000 0.842 0.000 0.000 -0.000 – 0.000 0.715 0.001 0.002 -0.003 – 0.004 0.743 -0.000 0.000 -0.001 – 0.000 0.916
education [1] -0.015 0.128 -0.266 – 0.237 0.910 -0.003 0.018 -0.038 – 0.031 0.843 0.001 0.002 -0.004 – 0.005 0.813 -0.041 0.135 -0.305 – 0.224 0.764 -0.008 0.019 -0.044 – 0.029 0.684
education [2] 0.021 0.123 -0.220 – 0.262 0.865 0.001 0.017 -0.032 – 0.034 0.940 -0.000 0.002 -0.005 – 0.004 0.988 0.007 0.129 -0.246 – 0.261 0.956 -0.001 0.018 -0.036 – 0.034 0.964
condition [remitted] *
rec mem pos mood cs lag
0.220 0.172 -0.116 – 0.557 0.200 0.024 0.025 -0.025 – 0.072 0.341 -0.002 0.004 -0.009 – 0.006 0.657 0.121 0.174 -0.219 – 0.461 0.485 0.015 0.025 -0.034 – 0.063 0.552
condition [depressed] *
rec mem pos mood cs lag
0.564 0.192 0.186 – 0.941 0.003 0.082 0.028 0.027 – 0.136 0.003 -0.010 0.004 -0.018 – -0.002 0.014 0.443 0.193 0.064 – 0.822 0.022 0.076 0.028 0.022 – 0.130 0.006
condition [remitted] *
pos mood cs lag rec
0.073 0.167 -0.255 – 0.401 0.663 0.004 0.024 -0.043 – 0.052 0.857 0.001 0.004 -0.006 – 0.008 0.836 -0.061 0.168 -0.390 – 0.268 0.716 -0.008 0.024 -0.055 – 0.039 0.742
condition [depressed] *
pos mood cs lag rec
0.403 0.184 0.042 – 0.764 0.029 0.062 0.027 0.009 – 0.114 0.021 -0.008 0.004 -0.015 – 0.000 0.053 0.242 0.184 -0.118 – 0.603 0.187 0.051 0.026 -0.001 – 0.103 0.056
rec mem pos mood cs lag *
pos mood cs lag rec
0.036 0.019 -0.001 – 0.074 0.054 0.002 0.003 -0.003 – 0.007 0.500 0.000 0.000 -0.001 – 0.001 0.494 0.026 0.020 -0.013 – 0.065 0.196 0.001 0.003 -0.004 – 0.007 0.644
(condition [remitted]
rec mem pos mood cs lag)
pos mood cs lag rec
-0.016 0.022 -0.060 – 0.028 0.474 -0.001 0.003 -0.008 – 0.005 0.669 -0.000 0.000 -0.001 – 0.001 0.993 -0.000 0.023 -0.046 – 0.045 0.988 0.000 0.003 -0.006 – 0.006 0.958
(condition [depressed]
rec mem pos mood cs lag)
pos mood cs lag rec
-0.061 0.025 -0.110 – -0.013 0.014 -0.009 0.003 -0.016 – -0.002 0.010 0.001 0.001 0.000 – 0.002 0.033 -0.043 0.025 -0.093 – 0.007 0.091 -0.008 0.004 -0.015 – -0.001 0.023
Random Effects
σ2 1.11 1.11 1.13 0.02 0.02
τ00 2.29 subjectcode 0.05 subjectcode 0.00 subjectcode 2.44 subjectcode 0.05 subjectcode
τ11 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag
0.04 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec 0.04 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec
ρ01 -0.52 -0.57 -0.61 -0.54 -0.45
-0.53 -0.53 -0.53 -0.53 -0.57
ICC       0.84  
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3083 3083 3083 3083 3083
Marginal R2 / Conditional R2 0.304 / NA 0.008 / NA 0.000 / NA 0.788 / 0.966 0.310 / NA
AIC 9387.969 9326.328 9347.958 9550.520 9616.734
NA
NA

Due to singularity this was determined to be the best fit for the moddel

summary(h2_pos_recent_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ condition * rec_mem_pos_mood_cs_lag * pos_mood_cs_lag_rec +      gender + age + education + (1 + rec_mem_pos_mood_cs_lag +  
    pos_mood_cs_lag_rec | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9326.3   9465.1  -4640.2   9280.3     3060 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.2531 -0.5506  0.0778  0.5755  5.6570 

Random effects:
 Groups      Name                    Variance  Std.Dev. Corr       
 subjectcode (Intercept)             0.0484939 0.22021             
             rec_mem_pos_mood_cs_lag 0.0007739 0.02782  -0.57      
             pos_mood_cs_lag_rec     0.0006748 0.02598  -0.53 -0.39
 Residual                            1.1105812 1.05384             
Number of obs: 3083, groups:  subjectcode, 185

Fixed effects:
                                                                 Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     1.535e+00  1.528e-01  10.049  < 2e-16 ***
conditionremitted                                              -1.296e-01  1.797e-01  -0.721  0.47081    
conditiondepressed                                             -5.680e-01  2.001e-01  -2.839  0.00453 ** 
rec_mem_pos_mood_cs_lag                                         3.475e-02  2.076e-02   1.674  0.09406 .  
pos_mood_cs_lag_rec                                             1.660e-02  2.034e-02   0.816  0.41442    
gender1                                                         1.644e-03  6.363e-03   0.258  0.79616    
age                                                            -4.927e-05  2.465e-04  -0.200  0.84156    
education1                                                     -3.483e-03  1.756e-02  -0.198  0.84278    
education2                                                      1.269e-03  1.685e-02   0.075  0.94000    
conditionremitted:rec_mem_pos_mood_cs_lag                       2.359e-02  2.476e-02   0.953  0.34074    
conditiondepressed:rec_mem_pos_mood_cs_lag                      8.165e-02  2.773e-02   2.945  0.00323 ** 
conditionremitted:pos_mood_cs_lag_rec                           4.383e-03  2.425e-02   0.181  0.85654    
conditiondepressed:pos_mood_cs_lag_rec                          6.150e-02  2.668e-02   2.305  0.02118 *  
rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec                     1.797e-03  2.666e-03   0.674  0.50041    
conditionremitted:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec  -1.357e-03  3.174e-03  -0.428  0.66897    
conditiondepressed:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec -8.978e-03  3.499e-03  -2.566  0.01030 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
plot_diagnostics(h2_pos_recent_models)

car::vif(h2_pos_recent_models[[2]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             5.368087e+05  2       27.067920
rec_mem_pos_mood_cs_lag                               4.561881e+01  1        6.754170
pos_mood_cs_lag_rec                                   4.787760e+01  1        6.919364
gender                                                1.064288e+00  1        1.031643
age                                                   1.061206e+00  1        1.030148
education                                             1.090474e+00  2        1.021889
condition:rec_mem_pos_mood_cs_lag                     5.714783e+05  2       27.494761
condition:pos_mood_cs_lag_rec                         5.985273e+05  2       27.814483
rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec           1.209466e+02  1       10.997574
condition:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec 5.606392e+05  2       27.363451

Follow-up

m2_pos_rec_control <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_control)

m2_pos_rec_remitted <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_remitted)

m2_pos_rec_depressed <-  lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_depressed)

asis_output(tab_model(m2_pos_rec_control, m2_pos_rec_remitted, m2_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr)
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.36 0.88 2.64 – 6.09 <0.001 3.61 0.67 2.30 – 4.92 <0.001 0.71 0.95 -1.16 – 2.58 0.458
rec mem pos mood cs lag 0.16 0.12 -0.07 – 0.40 0.177 0.36 0.09 0.19 – 0.53 <0.001 0.73 0.13 0.47 – 0.99 <0.001
pos mood cs lag rec 0.05 0.12 -0.18 – 0.29 0.651 0.07 0.09 -0.10 – 0.24 0.424 0.44 0.12 0.20 – 0.68 <0.001
gender [1] -0.07 0.08 -0.23 – 0.09 0.398 0.01 0.08 -0.14 – 0.16 0.870 0.14 0.10 -0.05 – 0.34 0.148
age 0.00 0.00 -0.00 – 0.01 0.467 0.00 0.00 -0.00 – 0.01 0.748 -0.00 0.00 -0.01 – 0.01 0.481
education [2] 0.07 0.08 -0.09 – 0.23 0.383 -0.03 0.16 -0.34 – 0.29 0.864 0.11 0.23 -0.34 – 0.57 0.624
rec mem pos mood cs lag *
pos mood cs lag rec
0.03 0.02 -0.01 – 0.06 0.118 0.01 0.01 -0.01 – 0.04 0.257 -0.03 0.02 -0.07 – -0.00 0.045
education [1] -0.03 0.17 -0.37 – 0.31 0.859 0.08 0.24 -0.39 – 0.56 0.730
Observations 869 1451 763
R2 / R2 adjusted 0.247 / 0.242 0.288 / 0.285 0.328 / 0.321
AIC 2428.646 4435.026 2483.576

Remote

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted.

h2_pos_remote_eq = "pos_mood_cs ~ condition*rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem + gender + age + education + (1 | subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_remote_models,  dv.labels = names(h2_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 0.962 2.389 -3.727 – 5.652 0.687 1.205 0.291 0.633 – 1.777 <0.001 0.223 0.034 0.155 – 0.290 <0.001 0.881 2.478 -3.983 – 5.745 0.722 1.189 0.334 0.534 – 1.845 <0.001
condition [remitted] 6.916 2.976 1.075 – 12.758 0.020 0.877 0.380 0.132 – 1.623 0.021 -0.100 0.048 -0.193 – -0.006 0.036 7.110 3.013 1.194 – 13.025 0.019 0.903 0.413 0.092 – 1.714 0.029
condition [depressed] 4.329 3.051 -1.661 – 10.318 0.156 0.531 0.391 -0.236 – 1.298 0.174 -0.056 0.050 -0.153 – 0.041 0.258 4.284 3.093 -1.788 – 10.356 0.166 0.545 0.424 -0.288 – 1.377 0.199
rem mem pos mood cs lag 1.183 0.373 0.450 – 1.915 0.002 0.144 0.042 0.061 – 0.227 0.001 -0.016 0.004 -0.025 – -0.007 <0.001 1.213 0.410 0.408 – 2.018 0.003 0.148 0.052 0.045 – 0.250 0.005
pos mood cs lag rem 0.491 0.320 -0.137 – 1.119 0.125 0.059 0.040 -0.020 – 0.137 0.142 -0.006 0.005 -0.015 – 0.004 0.250 0.434 0.329 -0.212 – 1.079 0.188 0.054 0.045 -0.034 – 0.142 0.227
gender [1] -0.101 0.106 -0.308 – 0.107 0.341 -0.013 0.014 -0.041 – 0.014 0.348 0.002 0.002 -0.002 – 0.005 0.356 -0.148 0.124 -0.392 – 0.095 0.232 -0.017 0.016 -0.050 – 0.015 0.289
age 0.001 0.004 -0.007 – 0.009 0.851 0.000 0.001 -0.001 – 0.001 0.852 -0.000 0.000 -0.000 – 0.000 0.860 0.001 0.005 -0.009 – 0.010 0.897 -0.000 0.001 -0.001 – 0.001 0.994
education [1] 0.011 0.304 -0.587 – 0.608 0.972 0.002 0.041 -0.079 – 0.083 0.954 -0.000 0.006 -0.012 – 0.011 0.931 -0.069 0.350 -0.755 – 0.617 0.843 -0.005 0.047 -0.097 – 0.086 0.907
education [2] 0.068 0.293 -0.508 – 0.644 0.816 0.010 0.040 -0.068 – 0.088 0.805 -0.001 0.005 -0.012 – 0.009 0.794 0.010 0.337 -0.651 – 0.671 0.977 0.004 0.045 -0.084 – 0.093 0.923
condition [remitted] *
rem mem pos mood cs lag
-1.325 0.474 -2.255 – -0.395 0.005 -0.165 0.058 -0.278 – -0.052 0.004 0.019 0.007 0.005 – 0.032 0.007 -1.412 0.503 -2.399 – -0.425 0.005 -0.172 0.066 -0.302 – -0.043 0.009
condition [depressed] *
rem mem pos mood cs lag
-0.828 0.483 -1.777 – 0.120 0.087 -0.099 0.058 -0.214 – 0.015 0.088 0.011 0.007 -0.003 – 0.024 0.130 -0.857 0.519 -1.877 – 0.162 0.099 -0.104 0.067 -0.236 – 0.028 0.124
condition [remitted] *
pos mood cs lag rem
-0.867 0.397 -1.646 – -0.089 0.029 -0.113 0.051 -0.213 – -0.012 0.028 0.013 0.007 0.000 – 0.026 0.045 -0.820 0.399 -1.604 – -0.036 0.040 -0.109 0.055 -0.217 – -0.000 0.049
condition [depressed] *
pos mood cs lag rem
-0.565 0.406 -1.362 – 0.231 0.164 -0.071 0.053 -0.175 – 0.032 0.177 0.008 0.007 -0.006 – 0.021 0.250 -0.501 0.409 -1.304 – 0.303 0.221 -0.067 0.057 -0.178 – 0.044 0.236
rem mem pos mood cs lag *
pos mood cs lag rem
-0.097 0.050 -0.196 – 0.001 0.053 -0.011 0.006 -0.023 – 0.000 0.052 0.001 0.001 -0.000 – 0.002 0.092 -0.088 0.055 -0.195 – 0.020 0.111 -0.011 0.007 -0.024 – 0.003 0.135
(condition [remitted]
rem mem pos mood cs lag)
pos mood cs lag rem
0.170 0.063 0.046 – 0.294 0.007 0.022 0.008 0.006 – 0.037 0.006 -0.002 0.001 -0.004 – -0.001 0.009 0.169 0.066 0.038 – 0.299 0.011 0.021 0.009 0.004 – 0.039 0.015
(condition [depressed]
rem mem pos mood cs lag)
pos mood cs lag rem
0.110 0.064 -0.016 – 0.236 0.087 0.014 0.008 -0.002 – 0.029 0.087 -0.001 0.001 -0.003 – 0.000 0.125 0.104 0.069 -0.030 – 0.239 0.129 0.013 0.009 -0.005 – 0.031 0.145
Random Effects
σ2 1.48 1.45 1.45 0.03 0.03
τ00 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode 0.13 subjectcode 0.00 subjectcode
ICC       0.84 0.07
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 774 774 774 774 774
Marginal R2 / Conditional R2 0.162 / NA 0.004 / NA 0.000 / NA 0.678 / 0.947 0.167 / 0.225
AIC 2579.769 2522.642 2524.618 2598.960 2608.460
NA
NA

Due to singularity a regular linear model was fitted.

summary(h2_pos_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ condition * rem_mem_pos_mood_cs_lag * pos_mood_cs_lag_rem +      gender + age + education + (1 | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2522.6   2606.4  -1243.3   2486.6      756 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0184 -0.5312  0.1162  0.5850  4.1139 

Random effects:
 Groups      Name        Variance Std.Dev.
 subjectcode (Intercept) 0.000    0.000   
 Residual                1.451    1.205   
Number of obs: 774, groups:  subjectcode, 184

Fixed effects:
                                                                 Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     1.2051866  0.2914537   4.135 3.55e-05 ***
conditionremitted                                               0.8774714  0.3795533   2.312 0.020786 *  
conditiondepressed                                              0.5310702  0.3907275   1.359 0.174089    
rem_mem_pos_mood_cs_lag                                         0.1439155  0.0421589   3.414 0.000641 ***
pos_mood_cs_lag_rem                                             0.0585855  0.0398361   1.471 0.141382    
gender1                                                        -0.0132039  0.0140553  -0.939 0.347513    
age                                                             0.0001037  0.0005555   0.187 0.851963    
education1                                                      0.0023843  0.0413052   0.058 0.953968    
education2                                                      0.0098235  0.0398240   0.247 0.805161    
conditionremitted:rem_mem_pos_mood_cs_lag                      -0.1648074  0.0575540  -2.864 0.004190 ** 
conditiondepressed:rem_mem_pos_mood_cs_lag                     -0.0994787  0.0582374  -1.708 0.087607 .  
conditionremitted:pos_mood_cs_lag_rem                          -0.1127666  0.0512992  -2.198 0.027934 *  
conditiondepressed:pos_mood_cs_lag_rem                         -0.0712884  0.0527025  -1.353 0.176166    
rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem                    -0.0114331  0.0058762  -1.946 0.051694 .  
conditionremitted:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem   0.0215715  0.0077950   2.767 0.005651 ** 
conditiondepressed:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem  0.0135242  0.0078953   1.713 0.086726 .  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
plot_diagnostics(h2_pos_remote_models)

car::vif(h2_pos_remote_models[[2]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             4.254349e+05  2       25.539259
rem_mem_pos_mood_cs_lag                               7.121652e+01  1        8.438988
pos_mood_cs_lag_rem                                   7.223756e+01  1        8.499268
gender                                                1.070625e+00  1        1.034710
age                                                   1.081350e+00  1        1.039880
education                                             1.100255e+00  2        1.024173
condition:rem_mem_pos_mood_cs_lag                     3.583274e+05  2       24.466396
condition:pos_mood_cs_lag_rem                         4.901003e+05  2       26.458866
rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem           1.344518e+02  1       11.595334
condition:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem 4.094042e+05  2       25.295197

Follow-up


m2_pos_rem_control <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_control)

m2_pos_rem_remitted <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education,  
                   data=df_medal_clean_remote_remitted)

m2_pos_rem_depressed <-  lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_depressed)

asis_output(tab_model(m2_pos_rem_control, m2_pos_rem_remitted, m2_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 0.98 2.04 -3.04 – 5.01 0.630 8.06 2.00 4.12 – 12.00 <0.001 5.13 2.11 0.96 – 9.30 0.016
rem mem pos mood cs lag 1.19 0.32 0.56 – 1.82 <0.001 -0.15 0.31 -0.77 – 0.46 0.627 0.37 0.32 -0.26 – 1.00 0.245
pos mood cs lag rem 0.50 0.28 -0.05 – 1.04 0.074 -0.38 0.25 -0.88 – 0.11 0.127 -0.05 0.26 -0.56 – 0.46 0.838
gender [1] -0.10 0.17 -0.44 – 0.23 0.543 -0.11 0.18 -0.46 – 0.24 0.530 -0.16 0.20 -0.56 – 0.24 0.430
age 0.00 0.01 -0.01 – 0.01 0.948 0.00 0.01 -0.01 – 0.01 0.977 -0.00 0.01 -0.02 – 0.02 0.965
education [2] 0.03 0.17 -0.32 – 0.37 0.881 0.01 0.40 -0.78 – 0.80 0.977 0.11 0.49 -0.85 – 1.08 0.816
rem mem pos mood cs lag *
pos mood cs lag rem
-0.10 0.04 -0.18 – -0.01 0.025 0.07 0.04 -0.01 – 0.15 0.072 0.01 0.04 -0.07 – 0.09 0.798
education [1] -0.19 0.43 -1.04 – 0.65 0.652 0.18 0.51 -0.82 – 1.18 0.724
Observations 224 357 190
R2 / R2 adjusted 0.190 / 0.168 0.136 / 0.119 0.207 / 0.177
AIC 661.932 1211.438 634.332

Negative Affect

Recent

h2_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_neg_mood_cs_lag + neg_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_recent_models, dv.labels = names(h2_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 3.005 0.794 1.447 – 4.562 <0.001 1.279 0.141 1.002 – 1.556 <0.001 0.245 0.023 0.200 – 0.290 <0.001 2.212 0.735 0.770 – 3.654 0.003 0.968 0.129 0.714 – 1.221 <0.001
condition [remitted] 0.156 0.950 -1.707 – 2.019 0.870 -0.031 0.164 -0.353 – 0.291 0.850 0.018 0.027 -0.034 – 0.070 0.494 0.372 0.893 -1.379 – 2.122 0.677 0.244 0.154 -0.058 – 0.545 0.113
condition [depressed] -0.371 1.004 -2.340 – 1.598 0.712 -0.275 0.179 -0.625 – 0.076 0.124 0.078 0.030 0.020 – 0.136 0.009 0.402 0.922 -1.405 – 2.208 0.663 0.047 0.163 -0.272 – 0.366 0.773
rec mem neg mood cs lag 0.390 0.114 0.167 – 0.613 0.001 0.068 0.020 0.029 – 0.107 0.001 -0.010 0.003 -0.017 – -0.004 0.001 0.521 0.107 0.311 – 0.731 <0.001 0.114 0.018 0.078 – 0.150 <0.001
neg mood cs lag rec 0.261 0.116 0.033 – 0.488 0.025 0.051 0.020 0.010 – 0.091 0.014 -0.008 0.003 -0.015 – -0.002 0.015 0.370 0.108 0.158 – 0.582 0.001 0.095 0.019 0.058 – 0.131 <0.001
gender [1] -0.004 0.047 -0.097 – 0.088 0.927 -0.002 0.008 -0.017 – 0.013 0.802 0.001 0.001 -0.002 – 0.003 0.679 0.006 0.049 -0.091 – 0.102 0.905 -0.001 0.008 -0.017 – 0.015 0.908
age -0.003 0.002 -0.007 – 0.000 0.068 -0.001 0.000 -0.001 – -0.000 0.026 0.000 0.000 0.000 – 0.000 0.014 -0.004 0.002 -0.007 – 0.000 0.054 -0.001 0.000 -0.001 – -0.000 0.021
education [1] -0.027 0.131 -0.283 – 0.230 0.839 -0.005 0.021 -0.046 – 0.036 0.801 0.001 0.003 -0.005 – 0.008 0.731 -0.032 0.138 -0.303 – 0.239 0.816 -0.009 0.022 -0.052 – 0.035 0.700
education [2] -0.081 0.126 -0.328 – 0.165 0.519 -0.016 0.020 -0.055 – 0.024 0.437 0.003 0.003 -0.003 – 0.009 0.371 -0.092 0.133 -0.352 – 0.167 0.486 -0.019 0.021 -0.061 – 0.023 0.378
condition [remitted] *
rec mem neg mood cs lag
-0.094 0.141 -0.371 – 0.184 0.509 -0.005 0.024 -0.052 – 0.041 0.833 -0.001 0.004 -0.009 – 0.006 0.716 -0.156 0.137 -0.424 – 0.113 0.255 -0.047 0.023 -0.091 – -0.002 0.040
condition [depressed] *
rec mem neg mood cs lag
-0.005 0.150 -0.299 – 0.290 0.976 0.032 0.026 -0.018 – 0.082 0.213 -0.010 0.004 -0.018 – -0.002 0.016 -0.123 0.143 -0.403 – 0.156 0.388 -0.018 0.024 -0.065 – 0.029 0.452
condition [remitted] *
neg mood cs lag rec
-0.088 0.144 -0.371 – 0.196 0.544 -0.006 0.025 -0.055 – 0.042 0.792 -0.001 0.004 -0.009 – 0.007 0.821 -0.116 0.139 -0.388 – 0.156 0.403 -0.046 0.023 -0.091 – -0.000 0.049
condition [depressed] *
neg mood cs lag rec
-0.124 0.155 -0.428 – 0.180 0.425 0.011 0.027 -0.042 – 0.064 0.686 -0.007 0.004 -0.015 – 0.002 0.126 -0.235 0.146 -0.522 – 0.052 0.109 -0.034 0.025 -0.083 – 0.015 0.176
rec mem neg mood cs lag *
neg mood cs lag rec
-0.018 0.014 -0.046 – 0.010 0.202 -0.004 0.003 -0.009 – 0.001 0.103 0.001 0.000 -0.000 – 0.001 0.106 -0.036 0.013 -0.062 – -0.009 0.009 -0.010 0.002 -0.015 – -0.006 <0.001
(condition [remitted]
rec mem neg mood cs lag)
neg mood cs lag rec
0.026 0.018 -0.010 – 0.062 0.161 0.003 0.003 -0.003 – 0.009 0.378 -0.000 0.000 -0.001 – 0.001 0.822 0.034 0.018 -0.001 – 0.070 0.059 0.009 0.003 0.003 – 0.014 0.004
(condition [depressed]
rec mem neg mood cs lag)
neg mood cs lag rec
0.029 0.020 -0.010 – 0.067 0.147 -0.000 0.003 -0.007 – 0.006 0.956 0.001 0.001 -0.000 – 0.002 0.168 0.045 0.019 0.007 – 0.083 0.019 0.007 0.003 0.000 – 0.013 0.036
Random Effects
σ2 1.14 1.12 1.12 0.03 0.03
τ00 2.08 subjectcode 0.05 subjectcode 0.00 subjectcode 1.81 subjectcode 0.05 subjectcode
τ11 0.06 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.05 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
0.04 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec 0.04 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec
ρ01 -0.69 -0.67 -0.63 -0.64 -0.66
-0.26 -0.26 -0.30 -0.30 -0.29
ICC       0.82 0.10
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3081 3081 3081 3081 3081
Marginal R2 / Conditional R2 0.279 / NA 0.010 / NA 0.000 / NA 0.749 / 0.954 0.268 / 0.344
AIC 9455.059 9378.341 9388.238 9084.980 9124.723
NA

Check Final Model

summary(h2_neg_recent_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rec_mem_neg_mood_cs_lag * neg_mood_cs_lag_rec +      gender + age + education + (1 + rec_mem_neg_mood_cs_lag +  
    neg_mood_cs_lag_rec | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9085.0   9223.7  -4519.5   9039.0     3058 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.1331 -0.5124 -0.0719  0.4245  6.2857 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr       
 subjectcode (Intercept)             1.81372  1.3467              
             rec_mem_neg_mood_cs_lag 0.05078  0.2253   -0.64      
             neg_mood_cs_lag_rec     0.03576  0.1891   -0.30 -0.54
 Residual                            0.02885  0.1699              
Number of obs: 3081, groups:  subjectcode, 185

Fixed effects:
                                                                Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     2.211835   0.735333   3.008 0.002630 ** 
conditionremitted                                               0.371524   0.892540   0.416 0.677224    
conditiondepressed                                              0.401500   0.921579   0.436 0.663079    
rec_mem_neg_mood_cs_lag                                         0.520675   0.107064   4.863 1.15e-06 ***
neg_mood_cs_lag_rec                                             0.369952   0.108351   3.414 0.000639 ***
gender1                                                         0.005875   0.049220   0.119 0.904984    
age                                                            -0.003681   0.001912  -1.925 0.054182 .  
education1                                                     -0.032141   0.138052  -0.233 0.815903    
education2                                                     -0.092470   0.132576  -0.697 0.485496    
conditionremitted:rec_mem_neg_mood_cs_lag                      -0.155844   0.136878  -1.139 0.254886    
conditiondepressed:rec_mem_neg_mood_cs_lag                     -0.123154   0.142500  -0.864 0.387457    
conditionremitted:neg_mood_cs_lag_rec                          -0.116118   0.138843  -0.836 0.402971    
conditiondepressed:neg_mood_cs_lag_rec                         -0.234626   0.146421  -1.602 0.109065    
rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec                    -0.035504   0.013497  -2.630 0.008527 ** 
conditionremitted:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec   0.034318   0.018169   1.889 0.058919 .  
conditiondepressed:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec  0.045092   0.019289   2.338 0.019399 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
optimizer (bobyqa) convergence code: 1 (bobyqa -- maximum number of function evaluations exceeded)
plot_diagnostics(h2_neg_recent_models) 

car::vif(h2_neg_recent_models[[4]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             82057.673785  2       16.925044
rec_mem_neg_mood_cs_lag                                  20.733232  1        4.553376
neg_mood_cs_lag_rec                                      24.868710  1        4.986854
gender                                                    1.066437  1        1.032685
age                                                       1.059283  1        1.029215
education                                                 1.083684  2        1.020295
condition:rec_mem_neg_mood_cs_lag                     94723.944243  2       17.543448
condition:neg_mood_cs_lag_rec                         86598.284836  2       17.154471
rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec              45.965007  1        6.779750
condition:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec 65773.641133  2       16.014485

Follow-up


m2_neg_rec_control <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + 
                              gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_remitted <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_depressed <-  glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode),  
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rec_control, m2_neg_rec_remitted, m2_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 2.05 0.66 0.76 – 3.35 0.002 2.47 0.67 1.16 – 3.79 <0.001 2.61 0.72 1.19 – 4.03 <0.001
rec mem neg mood cs lag 0.52 0.09 0.35 – 0.69 <0.001 0.37 0.09 0.19 – 0.55 <0.001 0.42 0.10 0.23 – 0.62 <0.001
neg mood cs lag rec 0.37 0.09 0.19 – 0.54 <0.001 0.22 0.09 0.04 – 0.40 0.016 0.23 0.11 0.02 – 0.43 0.031
gender [1] 0.00 0.07 -0.13 – 0.14 0.959 0.05 0.08 -0.12 – 0.21 0.588 -0.09 0.10 -0.28 – 0.10 0.357
age -0.00 0.00 -0.01 – 0.00 0.545 -0.00 0.00 -0.01 – 0.00 0.278 -0.00 0.00 -0.01 – 0.01 0.561
education [2] -0.02 0.07 -0.15 – 0.12 0.796 0.09 0.18 -0.26 – 0.43 0.623 -0.48 0.24 -0.96 – -0.01 0.048
rec mem neg mood cs lag *
neg mood cs lag rec
-0.04 0.01 -0.06 – -0.01 0.001 0.00 0.01 -0.03 – 0.03 0.970 0.00 0.01 -0.03 – 0.03 0.960
education [1] 0.10 0.19 -0.27 – 0.48 0.585 -0.32 0.25 -0.82 – 0.17 0.200
Random Effects
σ2 0.02 0.03 0.03
τ00 1.70 subjectcode 2.10 subjectcode 1.58 subjectcode
1.31 subjectcode.1 1.03 subjectcode.1 1.19 subjectcode.1
τ11 0.04 subjectcode.rec_mem_neg_mood_cs_lag 0.05 subjectcode.rec_mem_neg_mood_cs_lag 0.04 subjectcode.rec_mem_neg_mood_cs_lag
0.03 subjectcode.1.neg_mood_cs_lag_rec 0.02 subjectcode.1.neg_mood_cs_lag_rec 0.03 subjectcode.1.neg_mood_cs_lag_rec
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
-1.00 subjectcode.1 -1.00 subjectcode.1 -1.00 subjectcode.1
ICC 0.76 0.75 0.73
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 868 1450 763
Marginal R2 / Conditional R2 0.708 / 0.931 0.793 / 0.948 0.851 / 0.960
AIC 2147.700 4475.592 2409.760

Remote

h2_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem + gender + age + education + (1 + rem_mem_neg_mood_cs_lag + neg_mood_cs_lag_rem|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_remote_models, dv.labels = names(h2_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.100 2.475 0.242 – 9.958 0.040 1.814 0.467 0.897 – 2.731 <0.001 0.155 0.076 0.006 – 0.303 0.041 5.092 2.363 0.452 – 9.731 0.032 2.523 0.429 1.681 – 3.365 <0.001
condition [remitted] -1.784 2.762 -7.205 – 3.638 0.519 -0.435 0.512 -1.441 – 0.570 0.396 0.074 0.083 -0.090 – 0.237 0.377 -2.678 2.664 -7.908 – 2.553 0.315 -1.137 0.480 -2.080 – -0.194 0.018
condition [depressed] -1.912 2.797 -7.402 – 3.578 0.494 -0.451 0.517 -1.467 – 0.564 0.383 0.078 0.084 -0.088 – 0.243 0.357 -1.011 2.691 -6.293 – 4.271 0.707 -1.132 0.484 -2.081 – -0.182 0.020
rem mem neg mood cs lag 0.018 0.478 -0.919 – 0.956 0.970 -0.027 0.087 -0.197 – 0.144 0.757 0.006 0.014 -0.021 – 0.033 0.679 0.016 0.455 -0.878 – 0.909 0.972 -0.157 0.081 -0.316 – 0.002 0.053
neg mood cs lag rem -0.142 0.412 -0.952 – 0.667 0.730 -0.049 0.079 -0.204 – 0.106 0.533 0.009 0.013 -0.016 – 0.034 0.496 -0.102 0.384 -0.855 – 0.652 0.791 -0.172 0.071 -0.311 – -0.033 0.016
gender [1] 0.071 0.099 -0.124 – 0.266 0.475 0.010 0.016 -0.021 – 0.041 0.541 -0.002 0.003 -0.007 – 0.003 0.542 0.033 0.110 -0.183 – 0.250 0.762 0.004 0.018 -0.032 – 0.039 0.839
age -0.000 0.004 -0.008 – 0.007 0.918 -0.000 0.001 -0.001 – 0.001 0.946 0.000 0.000 -0.000 – 0.000 0.979 -0.002 0.004 -0.010 – 0.007 0.724 -0.000 0.001 -0.002 – 0.001 0.833
education [1] 0.315 0.291 -0.257 – 0.887 0.280 0.047 0.047 -0.045 – 0.138 0.317 -0.007 0.008 -0.022 – 0.008 0.360 0.265 0.316 -0.356 – 0.885 0.403 0.045 0.052 -0.057 – 0.147 0.387
education [2] 0.127 0.281 -0.424 – 0.679 0.651 0.017 0.045 -0.071 – 0.106 0.704 -0.002 0.007 -0.017 – 0.012 0.738 0.068 0.304 -0.529 – 0.664 0.824 0.013 0.050 -0.085 – 0.112 0.792
condition [remitted] *
rem mem neg mood cs lag
0.647 0.532 -0.397 – 1.692 0.224 0.131 0.095 -0.056 – 0.318 0.170 -0.021 0.015 -0.051 – 0.008 0.157 0.781 0.514 -0.229 – 1.790 0.129 0.256 0.091 0.078 – 0.433 0.005
condition [depressed] *
rem mem neg mood cs lag
0.583 0.539 -0.476 – 1.641 0.280 0.117 0.096 -0.072 – 0.306 0.224 -0.020 0.015 -0.049 – 0.010 0.202 0.400 0.518 -0.617 – 1.417 0.440 0.232 0.091 0.054 – 0.411 0.011
condition [remitted] *
neg mood cs lag rem
0.239 0.458 -0.660 – 1.138 0.602 0.063 0.086 -0.106 – 0.232 0.466 -0.010 0.014 -0.038 – 0.017 0.459 0.349 0.432 -0.500 – 1.197 0.420 0.183 0.079 0.028 – 0.339 0.021
condition [depressed] *
neg mood cs lag rem
0.314 0.465 -0.599 – 1.227 0.500 0.074 0.087 -0.097 – 0.244 0.395 -0.013 0.014 -0.040 – 0.015 0.374 0.093 0.438 -0.766 – 0.952 0.831 0.182 0.080 0.025 – 0.339 0.023
rem mem neg mood cs lag *
neg mood cs lag rem
0.051 0.080 -0.106 – 0.208 0.522 0.013 0.015 -0.016 – 0.042 0.379 -0.002 0.002 -0.007 – 0.002 0.346 0.048 0.075 -0.098 – 0.195 0.518 0.036 0.013 0.009 – 0.062 0.008
(condition [remitted]
rem mem neg mood cs lag)
neg mood cs lag rem
-0.096 0.087 -0.267 – 0.075 0.272 -0.020 0.016 -0.051 – 0.011 0.211 0.003 0.003 -0.002 – 0.008 0.204 -0.111 0.083 -0.274 – 0.051 0.180 -0.041 0.015 -0.071 – -0.012 0.005
(condition [depressed]
rem mem neg mood cs lag)
neg mood cs lag rem
-0.094 0.089 -0.268 – 0.080 0.290 -0.019 0.016 -0.050 – 0.012 0.238 0.003 0.003 -0.002 – 0.008 0.220 -0.052 0.084 -0.216 – 0.113 0.537 -0.037 0.015 -0.067 – -0.008 0.012
Random Effects
σ2 1.19 1.12 1.13 0.03 0.03
τ00 2.73 subjectcode 0.11 subjectcode 0.00 subjectcode 5.52 subjectcode 0.17 subjectcode
τ11 0.10 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.11 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
0.07 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem 0.09 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem
ρ01 -0.48 -0.43 -0.40 -0.59 -0.56
-0.55 -0.68 -0.73 -0.67 -0.69
ICC     0.00 0.93 0.29
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 773 773 773 773 773
Marginal R2 / Conditional R2 0.135 / NA 0.004 / NA 0.000 / 0.000 0.319 / 0.955 0.135 / 0.390
AIC 2517.737 2469.489 2474.506 2376.402 2379.731
NA

Check Final Model

summary(h2_neg_remote_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rem_mem_neg_mood_cs_lag * neg_mood_cs_lag_rem +      gender + age + education + (1 + rem_mem_neg_mood_cs_lag +  
    neg_mood_cs_lag_rem | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2376.4   2483.4  -1165.2   2330.4      750 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.8829 -0.4910 -0.0808  0.4597  3.6891 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr       
 subjectcode (Intercept)             5.51536  2.3485              
             rem_mem_neg_mood_cs_lag 0.11300  0.3362   -0.59      
             neg_mood_cs_lag_rem     0.09193  0.3032   -0.67 -0.19
 Residual                            0.02806  0.1675              
Number of obs: 773, groups:  subjectcode, 184

Fixed effects:
                                                                Estimate Std. Error t value Pr(>|z|)  
(Intercept)                                                     5.091663   2.363471   2.154   0.0312 *
conditionremitted                                              -2.677690   2.664412  -1.005   0.3149  
conditiondepressed                                             -1.010776   2.690583  -0.376   0.7072  
rem_mem_neg_mood_cs_lag                                         0.015726   0.455245   0.035   0.9724  
neg_mood_cs_lag_rem                                            -0.101715   0.383861  -0.265   0.7910  
gender1                                                         0.033369   0.110264   0.303   0.7622  
age                                                            -0.001511   0.004283  -0.353   0.7243  
education1                                                      0.264588   0.315893   0.838   0.4023  
education2                                                      0.067513   0.303826   0.222   0.8242  
conditionremitted:rem_mem_neg_mood_cs_lag                       0.780523   0.514154   1.518   0.1290  
conditiondepressed:rem_mem_neg_mood_cs_lag                      0.399851   0.518095   0.772   0.4402  
conditionremitted:neg_mood_cs_lag_rem                           0.348888   0.432270   0.807   0.4196  
conditiondepressed:neg_mood_cs_lag_rem                          0.093241   0.437503   0.213   0.8312  
rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem                     0.048341   0.074764   0.647   0.5179  
conditionremitted:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem  -0.111077   0.082755  -1.342   0.1795  
conditiondepressed:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem -0.051695   0.083662  -0.618   0.5366  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
optimizer (bobyqa) convergence code: 1 (bobyqa -- maximum number of function evaluations exceeded)
plot_diagnostics(h2_neg_remote_models)

car::vif(h2_neg_remote_models[[4]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             1.720344e+05  2       20.365909
rem_mem_neg_mood_cs_lag                               7.755339e+01  1        8.806440
neg_mood_cs_lag_rem                                   7.176310e+01  1        8.471310
gender                                                1.066969e+00  1        1.032942
age                                                   1.090035e+00  1        1.044047
education                                             1.089387e+00  2        1.021635
condition:rem_mem_neg_mood_cs_lag                     1.852707e+05  2       20.746828
condition:neg_mood_cs_lag_rem                         1.833325e+05  2       20.692352
rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem           1.827300e+02  1       13.517764
condition:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem 1.977950e+05  2       21.088895

Follow-up


m2_neg_rem_control <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_remitted <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode),
                   data=df_medal_clean_remote_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_depressed <-  glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rem_control, m2_neg_rem_remitted, m2_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 6.08 2.12 1.90 – 10.26 0.005 2.97 1.60 -0.17 – 6.11 0.063 2.69 1.76 -0.79 – 6.17 0.130
rem mem neg mood cs lag -0.26 0.39 -1.03 – 0.52 0.516 0.73 0.26 0.22 – 1.24 0.005 0.57 0.27 0.03 – 1.11 0.039
neg mood cs lag rem -0.30 0.34 -0.97 – 0.37 0.373 0.15 0.21 -0.26 – 0.56 0.469 0.15 0.23 -0.31 – 0.61 0.508
gender [1] -0.01 0.13 -0.27 – 0.25 0.937 0.17 0.19 -0.21 – 0.55 0.389 0.01 0.21 -0.40 – 0.42 0.962
age 0.00 0.01 -0.01 – 0.01 0.820 -0.00 0.01 -0.02 – 0.01 0.739 0.01 0.01 -0.01 – 0.02 0.521
education [2] -0.12 0.13 -0.38 – 0.14 0.371 -0.09 0.44 -0.95 – 0.77 0.841 0.43 0.48 -0.51 – 1.38 0.366
rem mem neg mood cs lag *
neg mood cs lag rem
0.10 0.06 -0.02 – 0.23 0.105 -0.05 0.04 -0.12 – 0.02 0.189 -0.03 0.04 -0.12 – 0.05 0.424
education [1] 0.53 0.47 -0.40 – 1.46 0.264 0.32 0.50 -0.66 – 1.29 0.526
Random Effects
σ2 0.01 0.04 0.03
τ00 7.82 subjectcode 2.25 subjectcode 2.89 subjectcode
7.49 subjectcode.1 2.24 subjectcode.1 3.96 subjectcode.1
τ11 0.29 subjectcode.rem_mem_neg_mood_cs_lag 0.09 subjectcode.rem_mem_neg_mood_cs_lag 0.10 subjectcode.rem_mem_neg_mood_cs_lag
0.20 subjectcode.1.neg_mood_cs_lag_rem 0.05 subjectcode.1.neg_mood_cs_lag_rem 0.09 subjectcode.1.neg_mood_cs_lag_rem
ρ01 -1.00 subjectcode -0.99 subjectcode -0.99 subjectcode
-1.00 subjectcode.1 -0.98 subjectcode.1 -1.00 subjectcode.1
ICC 0.98 0.83 0.85
N 52 subjectcode 84 subjectcode 45 subjectcode
Observations 224 356 190
Marginal R2 / Conditional R2 0.340 / 0.985 0.565 / 0.926 0.474 / 0.923
AIC 449.613 1180.764 649.357

6 Exploratory Analyses

6.1 Mediation Models

Following the tests we ran for hypothesis 1, we would also like to see whether or not mediation effects exist.

RecPA


#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_moderation =  df_medal_clean %>% 
  filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717)) %>% 
  filter(!(is.na(pos_mood) | is.na(rec_mem_pos_mood_lag))) 

# med model
med_fit_h1_pos_rec <- lme4::lmer(rec_mem_pos_mood_lag ~ condition_dummy +
                                   age + gender + education + 
                                   (1 |subjectcode),
                                 data = df_medal_moderation)
# full model
out_fit_h1_pos_rec <-lme4::lmer(pos_mood ~ condition_dummy + rec_mem_pos_mood_lag +
                                  age + gender +education +
                                  (1 + rec_mem_pos_mood_lag |subjectcode), 
                                data = df_medal_moderation)
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv,  :
  Model failed to converge with max|grad| = 0.00497336 (tol = 0.002, component 1)
#mediation analysis for control and depressed
med_pos_rec_dep <- mediation::mediate(med_fit_h1_pos_rec, out_fit_h1_pos_rec,
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 3 ,  mediator = 'rec_mem_pos_mood_lag', 
                                      sims=10000)
summary(med_pos_rec_dep)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)             -1.531       -1.854        -1.22  <2e-16 ***
ACME (treated)             -1.531       -1.854        -1.22  <2e-16 ***
ADE (control)              -1.415       -1.684        -1.15  <2e-16 ***
ADE (treated)              -1.415       -1.684        -1.15  <2e-16 ***
Total Effect               -2.946       -3.359        -2.54  <2e-16 ***
Prop. Mediated (control)    0.519        0.445         0.59  <2e-16 ***
Prop. Mediated (treated)    0.519        0.445         0.59  <2e-16 ***
ACME (average)             -1.531       -1.854        -1.22  <2e-16 ***
ADE (average)              -1.415       -1.684        -1.15  <2e-16 ***
Prop. Mediated (average)    0.519        0.445         0.59  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3046 


Simulations: 10000 

RemNA

df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rec_mem_neg_mood_lag))) 

# med model
med_fit_h1_neg_rec <- lme4::lmer(rec_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +  
                                   (1 |subjectcode), 
                                 data = df_medal_moderation)
# full model
out_fit_h1_neg_rec <- lme4::lmer(neg_mood ~ condition_dummy + rec_mem_neg_mood_lag +
                                  age + gender +education + 
                                  (1 + rec_mem_neg_mood_lag|subjectcode), 
                                data = df_medal_moderation)


#mediation analysis for control and remitted
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' , mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 2 ,   
                           sims=10000))

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              0.722        0.456         0.99  <2e-16 ***
ACME (treated)              0.722        0.456         0.99  <2e-16 ***
ADE (control)               0.795        0.488         1.11  <2e-16 ***
ADE (treated)               0.795        0.488         1.11  <2e-16 ***
Total Effect                1.516        1.117         1.91  <2e-16 ***
Prop. Mediated (control)    0.476        0.339         0.62  <2e-16 ***
Prop. Mediated (treated)    0.476        0.339         0.62  <2e-16 ***
ACME (average)              0.722        0.456         0.99  <2e-16 ***
ADE (average)               0.795        0.488         1.11  <2e-16 ***
Prop. Mediated (average)    0.476        0.339         0.62  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3044 


Simulations: 10000 
#mediation analysis for control and depressed
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' ,  mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 3 ,
                           sims=10000))

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              1.817        1.467         2.19  <2e-16 ***
ACME (treated)              1.817        1.467         2.19  <2e-16 ***
ADE (control)               1.763        1.383         2.14  <2e-16 ***
ADE (treated)               1.763        1.383         2.14  <2e-16 ***
Total Effect                3.580        3.077         4.08  <2e-16 ***
Prop. Mediated (control)    0.507        0.432         0.59  <2e-16 ***
Prop. Mediated (treated)    0.507        0.432         0.59  <2e-16 ***
ACME (average)              1.817        1.467         2.19  <2e-16 ***
ADE (average)               1.763        1.383         2.14  <2e-16 ***
Prop. Mediated (average)    0.507        0.432         0.59  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3044 


Simulations: 10000 

6.2 Moderated Mediation

RemPA-CurrPA


df_medal_moderation = df_medal_clean_remote %>% 
  filter(!(is.na(pos_mood) | is.na(rem_mem_pos_mood_lag))) 

#mediation first
med_fit_h2_pos_rem <- lme4::lmer(rem_mem_pos_mood_lag ~ condition_dummy + 
                                   age + gender + education + 
                                   (1 |subjectcode), 
                                 data =df_medal_moderation)
# full model
out_fit_h2_pos_rem <-lme4::lmer(pos_mood ~ condition_dummy + rem_mem_pos_mood_lag 
                                + age + gender +education +
                                  (1 + rem_mem_pos_mood_lag |subjectcode),
                                data=df_medal_moderation)
Warning in checkConv(attr(opt, "derivs"), opt$par, ctrl = control$checkConv,  :
  Model failed to converge with max|grad| = 0.00602257 (tol = 0.002, component 1)
#mediation analysis for control and remitted
med_pos_rem_rem <- mediation::mediate(med_fit_h2_pos_rem, out_fit_h2_pos_rem, 
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_pos_mood_lag')
summary(med_pos_rem_rem)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)             -0.662       -0.975        -0.35  <2e-16 ***
ACME (treated)             -0.662       -0.975        -0.35  <2e-16 ***
ADE (control)              -0.370       -0.702        -0.04   0.028 *  
ADE (treated)              -0.370       -0.702        -0.04   0.028 *  
Total Effect               -1.031       -1.464        -0.57  <2e-16 ***
Prop. Mediated (control)    0.641        0.417         0.95  <2e-16 ***
Prop. Mediated (treated)    0.641        0.417         0.95  <2e-16 ***
ACME (average)             -0.662       -0.975        -0.35  <2e-16 ***
ADE (average)              -0.370       -0.702        -0.04   0.028 *  
Prop. Mediated (average)    0.641        0.417         0.95  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 771 


Simulations: 1000 
#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_pos_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV =condition_dummy, DV = pos_mood, M = rem_mem_pos_mood_lag, Mod = pos_mood_lag_rem) 
rbindlist(med_h2_pos_rem_rem$paths, idcol = T)
NA

RemNA - CurrNA

df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rem_mem_neg_mood_lag))) 

#mediation first
med_fit_h2_neg_rem <- lme4::lmer(rem_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +
                                   (1 |subjectcode),
                                 control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                 data =df_medal_moderation)

out_fit_h2_neg_rem <-lme4::lmer(neg_mood ~ condition_dummy + rem_mem_neg_mood_lag +
                                  age + gender + education +
                                  (1 + rem_mem_neg_mood_lag |subjectcode), 
                                control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                data=df_medal_moderation)

#mediation analysis for control and depressed
med_neg_rem_dep <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem,  
                                      mediator = 'rem_mem_neg_mood_lag',  treat = 'condition_dummy' , 
                                      control.value = 1, treat.value = 3 , sims=10000) 
summary(med_neg_rem_dep)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              2.346        1.866         2.86  <2e-16 ***
ACME (treated)              2.346        1.866         2.86  <2e-16 ***
ADE (control)               1.227        0.784         1.67  <2e-16 ***
ADE (treated)               1.227        0.784         1.67  <2e-16 ***
Total Effect                3.573        2.996         4.16  <2e-16 ***
Prop. Mediated (control)    0.657        0.555         0.76  <2e-16 ***
Prop. Mediated (treated)    0.657        0.555         0.76  <2e-16 ***
ACME (average)              2.346        1.866         2.86  <2e-16 ***
ADE (average)               1.227        0.784         1.67  <2e-16 ***
Prop. Mediated (average)    0.657        0.555         0.76  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 770 


Simulations: 10000 
#mediation analysis for control and remitted
med_neg_rem_rem <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem, treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_neg_mood_lag', sims=10000) 
summary(med_neg_rem_rem)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              0.894        0.546         1.26  <2e-16 ***
ACME (treated)              0.894        0.546         1.26  <2e-16 ***
ADE (control)               0.422        0.142         0.70  0.0036 ** 
ADE (treated)               0.422        0.142         0.70  0.0036 ** 
Total Effect                1.315        0.874         1.76  <2e-16 ***
Prop. Mediated (control)    0.679        0.503         0.87  <2e-16 ***
Prop. Mediated (treated)    0.679        0.503         0.87  <2e-16 ***
ACME (average)              0.894        0.546         1.26  <2e-16 ***
ADE (average)               0.422        0.142         0.70  0.0036 ** 
Prop. Mediated (average)    0.679        0.503         0.87  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 770 


Simulations: 10000 
#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_neg_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_rem$paths, idcol = T)

#depressed group
df_medal_con_dep$condition_dummy = build_contrast(df_medal_con_dep$condition, 'control', 'depressed')
med_h2_neg_rem_dep <- mdt_moderated(data = df_medal_con_dep, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_dep$paths,idcol = T)
NA

6.3 Context

asis_output(tab_model( (lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)), 
           (lmer(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
            (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
             (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)))$knitr)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
  pos mood cs pos mood cs neg mood cs neg mood cs
Predictors Estimates CI p Estimates CI p Estimates CI p Estimates CI p
(Intercept) 3.81 3.46 – 4.16 <0.001 5.35 4.64 – 6.05 <0.001 4.77 4.11 – 5.44 <0.001 4.77 4.11 – 5.44 <0.001
rec mem pos mood cs lag 0.49 0.44 – 0.54 <0.001
context location 0.01 -0.13 – 0.14 0.936 -0.25 -0.53 – 0.02 0.068 -0.19 -0.44 – 0.06 0.136 -0.19 -0.44 – 0.06 0.136
rec mem pos mood cs lag *
context location
0.01 -0.01 – 0.03 0.521
rem mem pos mood cs lag 0.33 0.21 – 0.45 <0.001
rem mem pos mood cs lag *
context location
0.05 0.01 – 0.10 0.023
rem mem neg mood cs lag 0.26 0.14 – 0.39 <0.001 0.26 0.14 – 0.39 <0.001
rem mem neg mood cs lag *
context location
0.04 -0.01 – 0.08 0.129 0.04 -0.01 – 0.08 0.129
Random Effects
σ2 1.28 1.48 1.45 1.45
τ00 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode
N 189 subjectcode 188 subjectcode 188 subjectcode 188 subjectcode
Observations 3140 789 789 789
Marginal R2 / Conditional R2 0.257 / NA 0.163 / NA 0.100 / NA 0.100 / NA

6.4 SRET

In addition to the EMA, participants also completed the SRET. This is a lab based memory bias measure. We want to check whether our memory bias measures from real-life are linked to ones from the lab in this supplementary analysis.


library('haven')
Warning: package ‘haven’ was built under R version 4.1.3
sret_file = file.path("Z:/", "inbox", "transfer-2023-12-07-02-15-pm", 'MEDAL_pre and post quest and remote recall_workfile!!_for paper only var.sav')
sret_data = read_sav(sret_file) 
sret_data = sret_data %>% select(c(1,4)) %>% distinct()

df_medal_rec_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rec_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()  %>% ungroup()


df_medal_rec_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rec_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct() %>% ungroup()


df_medal_rem_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rem_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()

df_medal_rem_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rem_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()


df_cors = rbindlist(list(df_medal_rec_cor1, df_medal_rec_cor2, df_medal_rem_cor1, df_medal_rem_cor2))

df_ref = merge(sret_data, df_cors, by.x = 'subject', by.y = 'subjectcode')
df_ref = df_ref %>% rename(SRET = SRET_BiasGotlib_neg) %>% dplyr::mutate(group = ifelse(subject<800, "remitted", 
                                                                    ifelse(subject>=900, "control", 'depressed')) )


for (mod_type in c("PA_REC", "PA_REM", "NA_REC", "NA_REM")){
  print(mod_type)
  print(summary(lm(SRET ~ corr*group, data = df_ref %>% filter(model_type == mod_type))))
}
[1] "PA_REC"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.42802 -0.18783 -0.03731  0.07877  0.96612 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)  
(Intercept)         0.032066   0.055434   0.578   0.5637  
corr                0.009979   0.109528   0.091   0.9275  
groupdepressed      0.207749   0.113678   1.828   0.0694 .
groupremitted       0.109699   0.078769   1.393   0.1655  
corr:groupdepressed 0.299086   0.203331   1.471   0.1431  
corr:groupremitted  0.145614   0.151633   0.960   0.3383  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2605 on 171 degrees of freedom
  (12 observations deleted due to missingness)
Multiple R-squared:  0.2304,    Adjusted R-squared:  0.2079 
F-statistic: 10.24 on 5 and 171 DF,  p-value: 1.343e-08

[1] "PA_REM"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.41550 -0.21434 -0.03978  0.12018  0.96298 

Coefficients:
                     Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.034360   0.044794   0.767 0.444147    
corr                 0.008142   0.065661   0.124 0.901462    
groupdepressed       0.365632   0.065231   5.605 8.57e-08 ***
groupremitted        0.192437   0.056284   3.419 0.000792 ***
corr:groupdepressed  0.007906   0.096231   0.082 0.934621    
corr:groupremitted  -0.025126   0.082845  -0.303 0.762047    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2675 on 165 degrees of freedom
  (17 observations deleted due to missingness)
Multiple R-squared:  0.2043,    Adjusted R-squared:  0.1802 
F-statistic: 8.475 on 5 and 165 DF,  p-value: 3.685e-07

[1] "NA_REC"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.45379 -0.17704 -0.03939  0.08131  0.96345 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.03215    0.04477   0.718 0.473691    
corr                 0.01727    0.10065   0.172 0.863976    
groupdepressed       0.29548    0.08311   3.555 0.000489 ***
groupremitted        0.10471    0.06401   1.636 0.103709    
corr:groupdepressed  0.14637    0.15930   0.919 0.359496    
corr:groupremitted   0.18205    0.13735   1.325 0.186793    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2605 on 170 degrees of freedom
  (13 observations deleted due to missingness)
Multiple R-squared:  0.2323,    Adjusted R-squared:  0.2097 
F-statistic: 10.29 on 5 and 170 DF,  p-value: 1.249e-08

[1] "NA_REM"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.43367 -0.17164 -0.04881  0.12752  0.94467 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.04423    0.04128   1.071 0.285714    
corr                -0.02001    0.06467  -0.309 0.757395    
groupdepressed       0.34096    0.06427   5.305 3.86e-07 ***
groupremitted        0.19828    0.05408   3.667 0.000338 ***
corr:groupdepressed  0.08919    0.10774   0.828 0.409041    
corr:groupremitted  -0.07917    0.08544  -0.927 0.355620    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2702 on 154 degrees of freedom
  (28 observations deleted due to missingness)
Multiple R-squared:  0.2103,    Adjusted R-squared:  0.1846 
F-statistic: 8.201 on 5 and 154 DF,  p-value: 6.88e-07

6.5 Relative Bias

Here we want to check for the relative NA/PA bias. So we extact the random effects from the models to get the slopes for each subject, and then compare the NA/PA slopes. This tells us if theres a bias between negative and positive memory.


# Get individual slopes
pos_rec_ref = as.data.frame(ranef(h1_pos_recent_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rem_ref = as.data.frame(ranef(h1_pos_remote_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rec_ref = as.data.frame(ranef(h1_neg_recent_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rem_ref = as.data.frame(ranef(h1_neg_remote_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rec_ref$mod = "PA_REC"
pos_rem_ref$mod = "PA_REM"
neg_rec_ref$mod = "NA_REC"
neg_rem_ref$mod = "NA_REM"

# Put together into a new data frame
df_ref = rbindlist(list(pos_rec_ref, pos_rem_ref, neg_rec_ref, neg_rem_ref))
df_ref = df_ref %>% 
  dplyr::mutate(sub = as.numeric(as.character(grp))) %>%
  dplyr::mutate(group = ifelse(sub <= 800, "remitted", 
                               ifelse(sub>=900, "control", 'depressed')) )

df_ref_rec = df_ref %>% filter(mod == 'PA_REC' | mod=='NA_REC')
df_ref_rem = df_ref %>% filter(mod == 'PA_REM' | mod=='NA_REM')
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rec))
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: condsd ~ mod * group + (1 | sub)
   Data: df_ref_rec

REML criterion at convergence: -1627.3

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.2098 -0.1327  0.0031  0.2809  2.0044 

Random effects:
 Groups   Name        Variance  Std.Dev.
 sub      (Intercept) 7.518e-05 0.008671
 Residual             5.555e-04 0.023568
Number of obs: 370, groups:  sub, 185

Fixed effects:
                           Estimate Std. Error         df t value Pr(>|t|)    
(Intercept)                0.174568   0.003449 358.899624  50.607  < 2e-16 ***
modPA_REC                 -0.151833   0.004578 182.000000 -33.164  < 2e-16 ***
groupdepressed            -0.031248   0.005060 358.899624  -6.175 1.79e-09 ***
groupremitted             -0.019827   0.004385 358.899624  -4.521 8.37e-06 ***
modPA_REC:groupdepressed   0.028707   0.006716 182.000000   4.274 3.09e-05 ***
modPA_REC:groupremitted    0.018436   0.005820 182.000000   3.167   0.0018 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
              (Intr) mdPA_REC grpdpr grprmt mdPA_REC:grpd
modPA_REC     -0.664                                     
groupdprssd   -0.682  0.452                              
groupremttd   -0.787  0.522    0.536                     
mdPA_REC:grpd  0.452 -0.682   -0.664 -0.356              
mdPA_REC:grpr  0.522 -0.787   -0.356 -0.664  0.536       
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rem))
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: condsd ~ mod * group + (1 | sub)
   Data: df_ref_rem

REML criterion at convergence: -3051.1

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.6567 -0.3176  0.0405  0.5475  1.7631 

Random effects:
 Groups   Name        Variance  Std.Dev.
 sub      (Intercept) 2.602e-06 0.001613
 Residual             9.639e-06 0.003105
Number of obs: 368, groups:  sub, 184

Fixed effects:
                           Estimate Std. Error         df t value Pr(>|t|)    
(Intercept)               4.995e-02  4.806e-04  3.464e+02 103.932  < 2e-16 ***
modPA_REM                -1.171e-02  6.031e-04  1.810e+02 -19.419  < 2e-16 ***
groupdepressed            6.612e-05  7.050e-04  3.464e+02   0.094   0.9253    
groupremitted            -1.070e-04  6.123e-04  3.464e+02  -0.175   0.8614    
modPA_REM:groupdepressed -3.565e-03  8.848e-04  1.810e+02  -4.029 8.23e-05 ***
modPA_REM:groupremitted  -1.531e-03  7.684e-04  1.810e+02  -1.992   0.0479 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
              (Intr) mdPA_REM grpdpr grprmt mdPA_REM:grpd
modPA_REM     -0.627                                     
groupdprssd   -0.682  0.428                              
groupremttd   -0.785  0.492    0.535                     
mdPA_REM:grpd  0.428 -0.682   -0.627 -0.336              
mdPA_REM:grpr  0.492 -0.785   -0.336 -0.627  0.535       

7 Plots

First we set our theme and fix the data for the figures we want

# Theme
ggtheme = theme(text = element_text(size = 8), 
                panel.background = element_rect(fill = "transparent"), 
                panel.grid.major = element_line(color = "grey90"),
                panel.grid.minor = element_line(color = "grey100"),
                panel.border = element_rect(color = "grey80", fill = "transparent"))

# Estimate the SD
df_medal_clean2 = df_medal_clean %>% 
  mutate(pos_mood_sd = ifelse( (pos_mood_cs_lag_rec > (mean(pos_mood_cs_lag_rec, na.rm=T)+sd(pos_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (pos_mood_cs_lag_rec < (mean(pos_mood_cs_lag_rec, na.rm=T)-sd(pos_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean"))) %>% 
    mutate(neg_mood_sd = ifelse( (neg_mood_cs_lag_rec > (mean(neg_mood_cs_lag_rec, na.rm=T)+sd(neg_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (neg_mood_cs_lag_rec < (mean(neg_mood_cs_lag_rec, na.rm=T)-sd(neg_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean")))

# Fix factor levels
df_medal_clean2$Condition =  factor(df_medal_clean2$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('Never-Depressed', 'Remitted', 'Depressed'))
df_medal_clean2$pos_mood_sd =  factor(df_medal_clean2$pos_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))
df_medal_clean2$neg_mood_sd =  factor(df_medal_clean2$neg_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))

7.0.1 PA-CUR-REC


# Plot
plot_pa_rec = ggplot(df_medal_clean2, aes(x = rec_mem_pos_mood_c_lag, y=pos_mood_c, color = pos_mood_sd, fill = pos_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC[PM], " (subject-centered a.u.)")), y = "PM (subject-centered a.u.)", color = bquote(CUR[PM]), fill = bquote(CUR[PM]) ) +
  ggtheme + 
  facet_grid(.~Condition) 
plot_pa_rec
`geom_smooth()` using formula = 'y ~ x'
Warning: Removed 4237 rows containing non-finite values (`stat_smooth()`).

7.0.2 PA-CUR-REC


# Plot
plot_na_rec = 
  ggplot(df_medal_clean2, aes(x = rec_mem_neg_mood_c_lag, y=neg_mood_c, color = neg_mood_sd, fill = neg_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC['NM'], " (subject-centered a.u.)")), y = "NM (subject-centered a.u.)", color = bquote(CUR["NM"]), fill = bquote(CUR["NM"]) )+
  ggtheme + 
  facet_grid(.~Condition) 
plot_na_rec
`geom_smooth()` using formula = 'y ~ x'
Warning: Removed 4239 rows containing non-finite values (`stat_smooth()`).

7.0.3 Combined Plot for pub

ggarrange(NA, plot_pa_rec, NA,  plot_na_rec,
          widths=c(0.05, 1, 0.05, 1), ncol = 2, nrow = 2,
          labels=c("A", NA, "B"))
Warning in as_grob.default(plot) :
  Cannot convert object of class logical into a grob.
`geom_smooth()` using formula = 'y ~ x'
Warning: Removed 4237 rows containing non-finite values (`stat_smooth()`).
Warning in as_grob.default(plot) :
  Cannot convert object of class logical into a grob.
`geom_smooth()` using formula = 'y ~ x'
Warning: Removed 4239 rows containing non-finite values (`stat_smooth()`).
Warning: Removed 1 rows containing missing values (`geom_text()`).
ggsave("figures/figure_2_threewayInteraction.pdf", device = 'pdf', dpi = 320, height = 6)
Saving 7.29 x 6 in image
Warning: Removed 1 rows containing missing values (`geom_text()`).

8 System Info

sessionInfo()
R version 4.1.1 (2021-08-10)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows Server x64 (build 17763)

Matrix products: default

locale:
[1] LC_COLLATE=English_United States.1252  LC_CTYPE=English_United States.1252    LC_MONETARY=English_United States.1252
[4] LC_NUMERIC=C                           LC_TIME=English_United States.1252    

attached base packages:
[1] parallel  stats     graphics  grDevices datasets  utils     methods   base     

other attached packages:
 [1] haven_2.5.2        wesanderson_0.3.6  JSmediation_0.2.1  jtools_2.2.1       plotly_4.10.1      mediation_4.5.0    sandwich_3.0-2    
 [8] mvtnorm_1.1-3      MASS_7.3-54        ggpubr_0.6.0       doParallel_1.0.17  iterators_1.0.14   foreach_1.5.2      knitr_1.42        
[15] sjPlot_2.8.14      tidyr_1.3.0        DescTools_0.99.48  performance_0.10.3 lmerTest_3.1-3     lme4_1.1-31        Matrix_1.5-4.1    
[22] cli_3.6.1          pastecs_1.3.21     devtools_2.4.5     usethis_2.1.6      psych_2.2.9        ggplot2_3.4.2      data.table_1.14.4 
[29] readxl_1.4.1       janitor_2.1.0      dplyr_1.1.2       

loaded via a namespace (and not attached):
  [1] utf8_1.2.2              tidyselect_1.2.0        htmlwidgets_1.6.2       grid_4.1.1              lpSolve_5.6.18         
  [6] munsell_0.5.0           effectsize_0.8.3        codetools_0.2-18        ragg_1.2.5              miniUI_0.1.1.1         
 [11] withr_2.5.0             colorspace_2.0-3        uuid_1.1-0              rstudioapi_0.14         ggsignif_0.6.4         
 [16] officer_0.6.2           fontLiberation_0.1.0    labeling_0.4.2          emmeans_1.8.6           mnormt_2.1.1           
 [21] farver_2.1.1            datawizard_0.7.1        coda_0.19-4             vctrs_0.6.1             generics_0.1.3         
 [26] xfun_0.39               fontquiver_0.2.1        R6_2.5.1                BayesFactor_0.9.12-4.4  cachem_1.0.6           
 [31] promises_1.2.0.1        scales_1.2.1            nnet_7.3-16             rootSolve_1.8.2.4       gtable_0.3.3           
 [36] processx_3.8.0          lmom_3.0                rempsyc_0.1.2           MatrixModels_0.5-1      rlang_1.1.0            
 [41] zeallot_0.1.0           systemfonts_1.0.4       splines_4.1.1           rstatix_0.7.2           lazyeval_0.2.2         
 [46] broom_1.0.4             checkmate_2.2.0         yaml_2.3.6              abind_1.4-5             modelr_0.1.11          
 [51] crosstalk_1.2.0         backports_1.4.1         httpuv_1.6.6            Hmisc_5.1-0             tools_4.1.1            
 [56] ellipsis_0.3.2          proxy_0.4-27            sessioninfo_1.2.2       Rcpp_1.0.9              base64enc_0.1-3        
 [61] purrr_1.0.1             ps_1.7.2                prettyunits_1.1.1       rpart_4.1-15            openssl_2.0.6          
 [66] pbapply_1.7-0           cowplot_1.1.1           correlation_0.8.4       urlchecker_1.0.1        zoo_1.8-12             
 [71] cluster_2.1.2           fs_1.5.2                crul_1.4.0              magrittr_2.0.3          flextable_0.9.1        
 [76] sjmisc_2.8.9            pkgload_1.3.2           hms_1.1.3               patchwork_1.1.2         mime_0.12              
 [81] evaluate_0.17           xtable_1.8-4            pbkrtest_0.5.2          sjstats_0.18.2          gridExtra_2.3          
 [86] ggeffects_1.2.2         compiler_4.1.1          fontBitstreamVera_0.1.1 tibble_3.2.1            ggstatsplot_0.11.1     
 [91] crayon_1.5.2            minqa_1.2.5             htmltools_0.5.5         mgcv_1.8-36             tzdb_0.4.0             
 [96] later_1.3.0             Formula_1.2-5           expm_0.999-9            Exact_3.2               lubridate_1.8.0        
[101] sjlabelled_1.2.0        boot_1.3-28             readr_2.1.4             car_3.1-2               insight_0.19.2         
[106] forcats_1.0.0           pkgconfig_2.0.3         statsExpressions_1.5.1  numDeriv_2016.8-1.1     foreign_0.8-81         
[111] xml2_1.3.3              paletteer_1.5.0         estimability_1.4.1      snakecase_0.11.0        stringr_1.5.0          
[116] callr_3.7.3             digest_0.6.29           parameters_0.21.0       httpcode_0.3.0          rmarkdown_2.21         
[121] cellranger_1.1.0        htmlTable_2.4.1         gld_2.6.6               gdtools_0.3.3           curl_5.0.0             
[126] shiny_1.7.4             nloptr_2.0.3            lifecycle_1.0.3         nlme_3.1-152            jsonlite_1.8.3         
[131] carData_3.0-5           viridisLite_0.4.2       askpass_1.1             fansi_1.0.3             pillar_1.9.0           
[136] lattice_0.20-44         fastmap_1.1.0           httr_1.4.6              pkgbuild_1.4.0          glue_1.6.2             
[141] remotes_2.4.2           zip_2.3.0               bayestestR_0.13.1       pander_0.6.5            class_7.3-19           
[146] stringi_1.7.8           profvis_0.3.7           rematch2_2.1.2          textshaping_0.3.6       gfonts_0.2.0           
[151] memoise_2.0.1           renv_0.15.5             e1071_1.7-13           
---
title: 'Manifestations of memory bias in daily life in currently and remitted depressed individuals: How accurate is recall of past mood states?'
author: Rayyan Tutunji, Noa Magusin, Nessa Ikani, Janna Vrijsen
date: "`r Sys.Date()`"
output:
  html_notebook:
    fig_width: 8
    fig_height: 8
    toc: yes
    toc_float: yes
    number_sections: yes
    theme: flatly
  html_document:
    df_print: paged
    toc: yes
    toc_float: yes
    number_sections: yes
    theme: flatly
---

# Introduction

This notebook contains analyses from the MEDAL study, assessing memory bias in remitted and depressed individuals. The pre-registration for the main analyses, as well as a preprint version of the accompanying article can be found [here](https://osf.io/zufjs/) as well. The main aim of the current work was to examine how three groups (control, remitted, depressed) differ in terms of emotional memory dynamics in a real-life setting.

```{r message=TRUE, warning=FALSE}
#Load packages 
renv::activate()
library(dplyr)
library(janitor)
library(readxl)
library(data.table)
library(ggplot2)
library(psych)
library(devtools)
library(pastecs)
library(cli)
library(lmerTest) #linear mixed models 
library(performance) #fits of residuals 
library(DescTools)
library(tidyr) # Separate trigger column
library(sjPlot) # for nice tables and interaction plots 
library(knitr) # to print the tables within notebook
library(foreach) #run parallel loops
library(parallel)
library(doParallel) #run parallel loops
library(ggpubr) # emmeans
library(mediation) #mediation analysis 
library(plotly) #for interactive plots
library(jtools) #for theme_apa
library(JSmediation) #moderated mediation
library(wesanderson) 
source("functions.R")
```

# Data

Here we load in our data and preprocess it in steps. We first need to separate the different surveys that were delivered to bring them into the same sampling times, merge in some descriptives we need as covariates, and estimate subject-centered and lagged variables for our models later. 


1. Load and clean data

```{r}
# Load the data
MEDAL_ESM_compleet <- read_excel("data/MEDAL_ESM_compleet.xlsx")
# Move variables around and clean names
MEDAL_ESM_compleet= MEDAL_ESM_compleet %>%
  rename(id='Movisens-ID') %>% 
  clean_names() %>%
  rename(subjectcode = 'deelnemersnr') %>% 
  relocate('subjectcode', .after = 'id')
df_medal_try = MEDAL_ESM_compleet

```

2. Split different EMA surveys we have

```{r}
# Sleep EMA Qs
df_medal_sleep = df_medal_try %>% 
  filter(form=='Slaap') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, 14)) 
# Recent EMA Qs
df_medal_recent = df_medal_try %>% 
  filter(form=='Recent') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rec')))
# Remote EMA Qs
df_medal_remote = df_medal_try %>%
  filter(form=='Remote') %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rem')))
# Mastery EMA Qs
df_medal_mastery = df_medal_try %>%
  filter(form=='Mastery') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('master')))
# Standard Qs
df_medal_standard = df_medal_try %>%
  filter(form=='Standaard')  %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, (-contains('mastery') & -contains('rem') & -contains('rec')))) %>% 
  dplyr::select(-c(7,13,14))

# merge sleep and standard
df_medal_merged = merge(df_medal_sleep, df_medal_standard, by=c('id', 'trigger_counter', 'subjectcode'), suffixes = c("_sleep", ""), all=T )

# merge to recent, remote and mastery
df_medal_merged= merge(df_medal_merged, df_medal_recent, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_recent'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_remote, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_remote'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_mastery, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_mastery'), all = T )

# Fix start dates and times
df_medal_merged$form_start_date <- as.character(df_medal_merged$form_start_date)
df_medal_merged$form_start_date <- as.ITime(df_medal_merged$form_start_date)
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(form_start_date = ifelse(is.na(form_start_date), form_start_date_sleep, form_start_date))
```

3. Add variables on Education, Gender, and Age

```{r}
# Load in previous data for demographics (also contains EMA)
load("data/IPAQ_Maeve_Workspace.RData")
df_MEDAL <- df_MEDAL[-c(192), ]

# Select relevant demo info
df_MEDAL = df_MEDAL %>% 
  dplyr::select(1, 3, 5:7) %>% 
  rename('subjectcode' = 'subject', 'education' = 'Education3groups', 'n_episodes'='NumberEpisodes' )

# merge demo to full EMA
df_medal_merged =  merge(df_medal_merged, df_MEDAL, by = c('subjectcode'), all = T ) %>%
  clean_names() 

df_medal_merged$education = as.factor(df_medal_merged$education)
df_medal_merged$gender = as.factor(df_medal_merged$gender)

df_medal_merged = df_medal_merged %>% 
  relocate(c('id', 'gender', 'age', 'education'), .after = 'subjectcode') %>% 
  relocate ('age', .after = 'gender') %>% 
  relocate ('education', .after = 'age') %>% 
  relocate ('id', .before = 'subjectcode')

```

4. Categorize the subjectcode into the three conditions (Depressed, Remitted, Control (Never-depressed))

```{r}
df_medal_merged$condition = case_when(df_medal_merged$subjectcode >= 700 & df_medal_merged$subjectcode < 800 ~ "remitted",
                                      df_medal_merged$subjectcode >= 800 & df_medal_merged$subjectcode < 900 ~ "depressed", 
                                      df_medal_merged$subjectcode >= 900 ~ "control")
df_medal_merged = df_medal_merged %>% relocate ('condition', .after = 'subjectcode' )
df_medal_merged$condition <- as.factor(df_medal_merged$condition)
```

5. Calculate reaction time 

```{r}
df_medal_merged$rt_sleep = lubridate::as.difftime( df_medal_merged$form_finish_date_sleep - df_medal_merged$form_start_date_sleep)
```

6. Separate time from when trigger was sent

```{r}

df_medal_merged = df_medal_merged %>% 
  tidyr::separate(trigger, c('Random', 'Time', 'Trigger_Time_1', 'Trigger_Time_2'))  %>% 
  dplyr::mutate(trigger_time = paste(Trigger_Time_1, Trigger_Time_2, sep=':')) %>%#separate time from previous trigger
  dplyr::mutate(trigger = paste(Random, Time, trigger_time, sep=' ')) #bring back 'trigger' column 

#Turn numbers into time format
df_medal_merged$trigger_time <- as.ITime(df_medal_merged$trigger_time)
```

7. Find the trigger number per day

```{r}
#df_medal_merged$trigger_counter[is.na(df_medal_merged$trigger_time)] <- 1

df_medal_merged$trigger_number[is.na(df_medal_merged$trigger_time)] <- 1 
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("10:00:00"))& df_medal_merged$trigger_time < (as.ITime("12:00:00"))] <- 2
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("12:00:00"))& df_medal_merged$trigger_time < (as.ITime("14:00:00"))] <- 3
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("14:00:00"))& df_medal_merged$trigger_time < (as.ITime("16:00:00"))] <- 4
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("16:00:00"))& df_medal_merged$trigger_time < (as.ITime("18:00:00"))] <- 5
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("18:00:00"))& df_medal_merged$trigger_time < (as.ITime("20:00:00"))] <- 6
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("20:00:00"))& df_medal_merged$trigger_time < (as.ITime("23:00:01"))] <- 7

# df
df_medal_merged = df_medal_merged %>% 
  relocate ('trigger_number', .after = 'trigger_counter')%>% relocate('trigger_time', .before ='form_start_date') %>%
  relocate('trigger', .before ='trigger_time')
```

8. Final cleaning of the dataset
```{r}

#Create dataset where double morning list is removed 
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter))

# now we have the clean dataframe, remove duplicated coloumns
df_medal_clean = df_medal_merged %>%
  dplyr::group_by(subjectcode) %>% 
  distinct(trigger_counter, .keep_all = TRUE) %>% 
  dplyr::select (1:12, 17:60) %>% 
  dplyr::mutate(original_order = row_number()) %>% 
  relocate('original_order', .before = 'id')

#Set the order of the factor to control, remission, depression (for visual purposes)
df_medal_clean$condition <- factor(df_medal_clean$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('control', 'remitted', 'depressed'))

# Create dummy variable 
df_medal_clean = df_medal_clean %>% 
  mutate(condition_dummy = case_when(condition == 'depressed'~ 3, condition == 'remitted'~ 2, condition == 'control' ~ 1))

df_medal_clean$condition_dummy = as.factor(df_medal_clean$condition_dummy)

#set weekday 
df_medal_clean$form_finish_date <- as.character(df_medal_clean$form_finish_date)
df_medal_clean$form_finish_date <- strptime(df_medal_clean$form_finish_date, format = '%Y-%m-%d %H:%M:%S')

df_medal_clean$finish_date <- as.Date(df_medal_clean$form_finish_date)
df_medal_clean$weekday <- as.integer(format(df_medal_clean$finish_date, '%w'))
df_medal_clean = df_medal_clean %>% relocate('weekday', .after = 'trigger_number')

#solve duplicate issue 
df_medal_duplicate = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% filter(duplicated(trigger_number))

  #since row 10 for subject 712 is a duplicate morning list
  df_medal_clean <- subset(df_medal_clean, !(subjectcode == 712 & original_order == 10))
  df_medal_clean = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% 
    mutate(temp = lead(trigger_number), 
           trigger_number = case_when(trigger_number == temp ~ (temp -1), TRUE ~ trigger_number)) %>%
    select(-temp)
  #Take the rows out that have 'NA'for the weekday since form wasn't finished and there is no data for PA, NA, or Memory
  df_medal_clean = df_medal_clean %>% filter(!is.na(weekday))
  df_medal_clean$original_order <- 1:nrow(df_medal_clean) 

# Create expanded df with all potential datapoints

df_medal_day = df_medal_clean %>% 
  dplyr::select('subjectcode', 'weekday', 'original_order') %>% 
  dplyr::mutate(weekday = as.numeric(weekday)) %>%
  dplyr::group_by(subjectcode) %>%
  dplyr:: distinct(weekday, .keep_all =T) %>% 
  dplyr::ungroup()

df_medal_day = df_medal_day %>% 
  slice(rep(1:n(), each = 7)) %>% 
  dplyr::mutate(trigger_number = rep(1:7, length.out=n())) %>% 
  dplyr::rename(order = original_order)


#merge with main dataframe
df_medal_clean =  merge(df_medal_day, df_medal_clean, by=c('subjectcode', 'trigger_number', 'weekday'), all= T) 
df_medal_clean = df_medal_clean %>% mutate(order = as.numeric(order))
df_medal_clean = arrange(df_medal_clean, order)
```

9. Centre, scale, and average Variables 

```{r warning=FALSE}

used_vars = c("neg_mood", "rec_mem_neg_mood", "rem_mem_neg_mood", "pos_mood", "rec_mem_pos_mood", "rem_mem_pos_mood")

#Centering & scaling 
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  # Center and scale
  dplyr::mutate( across(used_vars, ~ (.x/10), .names = "{.col}" ),
                 across(used_vars, ~ mean(.x, na.rm=T), .names = "{.col}_m" ),
                 across(used_vars, ~ (.x - mean(.x, na.rm=T)), .names = "{.col}_c" ), 
                 across(paste0(used_vars, "_c"), ~ DescTools::Winsorize(.x, na.rm = T), .names = "{.col}")) %>%  
  ungroup() %>%
  # Rescale to positive
  mutate(across(c(paste0(used_vars,"_c")), ~ abs(min(.x, na.rm = T)) + 1 + .x, .names = "{.col}s"))

```


10. Lag variables 
```{r}
# Remote 

df_medal_clean = df_medal_clean %>%
  dplyr::group_by(subjectcode) %>%
  mutate(rem_mem_pos_mood_lag = lead(rem_mem_pos_mood, n=7, order_by=subjectcode),
         rem_mem_neg_mood_lag = lead(rem_mem_neg_mood, n=7, order_by=subjectcode),
         pos_mood_lag_rem = lead(pos_mood, n=7, order_by=subjectcode),
         neg_mood_lag_rem = lead(neg_mood, n=7, order_by=subjectcode), 
         rem_mem_pos_mood_cs_lag = lead(rem_mem_pos_mood_cs, n=7, order_by=subjectcode),
         rem_mem_neg_mood_cs_lag = lead(rem_mem_neg_mood_cs, n=7, order_by=subjectcode),
         pos_mood_cs_lag_rem = lead(pos_mood_cs, n=7, order_by=subjectcode),
         neg_mood_cs_lag_rem = lead(neg_mood_cs, n=7, order_by=subjectcode),
         rem_mem_pos_mood_c_lag = lead(rem_mem_pos_mood_c, n=7, order_by=subjectcode),
         rem_mem_neg_mood_c_lag = lead(rem_mem_neg_mood_c, n=7, order_by=subjectcode),
         pos_mood_c_lag_rem = lead(pos_mood_c, n=7, order_by=subjectcode),
         neg_mood_c_lag_rem = lead(neg_mood_c, n=7, order_by=subjectcode)) %>% 
  filter(!is.na(original_order)) # filter out the NA rows for the lagging of recent memory 

# Recent
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  mutate(rec_mem_pos_mood_lag = lead(rec_mem_pos_mood, n=1, order_by=subjectcode),
         rec_mem_neg_mood_lag = lead(rec_mem_neg_mood, n=1, order_by=subjectcode),
         pos_mood_lag_rec = lead(pos_mood, n=1, order_by=subjectcode),
         neg_mood_lag_rec = lead(neg_mood, n=1, order_by=subjectcode),
         rec_mem_pos_mood_cs_lag = lead(rec_mem_pos_mood_cs, n=1, order_by=subjectcode),
         rec_mem_neg_mood_cs_lag = lead(rec_mem_neg_mood_cs, n=1, order_by=subjectcode),
         pos_mood_cs_lag_rec = lead(pos_mood_cs, n=1, order_by=subjectcode),
         neg_mood_cs_lag_rec = lead(neg_mood_cs, n=1, order_by=subjectcode),
         rec_mem_pos_mood_c_lag = lead(rec_mem_pos_mood_c, n=1, order_by=subjectcode),
         rec_mem_neg_mood_c_lag = lead(rec_mem_neg_mood_c, n=1, order_by=subjectcode),
         pos_mood_c_lag_rec = lead(pos_mood_c, n=1, order_by=subjectcode),
         neg_mood_c_lag_rec = lead(neg_mood_c, n=1, order_by=subjectcode)) %>% 
  ungroup()
  

```

# Descriptives {-}

We first plot some general population descriptives, followed by some descriptives of the scales we use in the EMA weeks and compliance rates for both positive and negative mood.

## Population {- .tabset}

### Demographics Table {-}
```{r fig.height=12, fig.width=12}
#Create Descriptives Tables
summary_table = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE) %>% group_by(condition) %>% summarise(
  'N' = n(),
  'Age \n M +/- SD' = paste0(round(mean(age, na.rm =T), 1), ' +/- ' , round(sd(age, na.rm =T), 1)),
  #'NA Age' = sum(is.na(age)), 
  '% Education Low' = mean(education == 0, na.rm = T)*100,
  '% Education Middle' = mean(education == 1, na.rm = T)*100,
  '% Education High' = mean(education == 2, na.rm = T)*100,
  #'NA Education' = sum(is.na(education)), 
  '% Female' = mean(gender ==1, na.rm = T)*100,
  #'NA Gender' = sum(is.na(gender))
)

#Compliance rates
df_medal_compliance_individual = df_medal_clean %>% group_by(condition, subjectcode) %>% summarise('ComplianceRate' = sum(!is.na(trigger_number))/42*100)

length(which(df_medal_compliance_individual$ComplianceRate <70))

df_medal_compliance = df_medal_clean %>% group_by(condition) %>% summarise('% Mean Compliance' = sum(!is.na(trigger_number))/(n_distinct(subjectcode) *42)*100) 


# Create a table
summary_table = summary_table %>% left_join(df_medal_compliance, by = 'condition')
rempsyc::nice_table(summary_table)
```

### Tests {-}
```{r}
#create dataset with only unique subjectcode 
df_medal_distinct = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE)%>% select('subjectcode', 'condition', 'age', 'education', 'gender')

#Test for differences between groups 
  #Age
  df_medal_distinct %>% lm(age ~ condition, data=.) %>% summary()
  
  #education
  chisq.test(df_medal_distinct$education, df_medal_distinct$condition)
  
  #gender
  chisq.test(df_medal_distinct$gender, df_medal_distinct$condition)
```

### Plot {-}
```{r}
#Create a Graph  
  #age
  boxplot_age <- ggplot(df_medal_distinct, aes(x=condition, y=age, fill=condition)) +
    geom_boxplot() +
    labs(x = 'Condition', y = 'Age', tag = 'C' ) +
    ggtitle('Age per Condition') +
    scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
    theme_apa() 
  ggplotly(boxplot_age)
    
    #education
    plot_educ <- ggplot(df_medal_distinct, aes(x=condition, fill = education)) +
      geom_bar(position = 'dodge') +
      labs(x='Condition', y= 'Proportion', tag = 'A' ) +
      ggtitle('Education Level per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Low' , 'Middle' , 'High'), name = 'Education')  +
      theme_apa() 
    ggplotly(plot_educ)
      
    #Gender
    plot_gender <- ggplot(df_medal_distinct, aes(x=condition, fill = gender)) +
      geom_bar(position = 'dodge') +
      labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
      ggtitle('Gender per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
      theme_apa() 
    ggplotly(plot_gender)
      
     plot_gender_2 <-  ggstatsplot::ggbarstats(data = df_medal_distinct, x = gender, y = condition) +
        labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
        ggtitle('Gender per Condition') +
        scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
        theme_apa() 
      ggplotly(plot_gender_2)

ggpubr::ggarrange(plot_educ, plot_gender, boxplot_age, nrow=2, ncol=2)    

# Subset by compliance rates
compliant_subs = df_medal_compliance_individual %>% filter(ComplianceRate >= 60) 
df_medal_clean = df_medal_clean[df_medal_clean$subjectcode %in% compliant_subs$subjectcode ,]
    
```

## Positive Mood  {.tabset -}

### Current Mood {-}
```{r}
describe.by(df_medal_clean$pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
``` 

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


### Recent Memory {-}

```{r}
describe.by(df_medal_clean$rec_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


### Remote Memory {-}

```{r}
describe.by(df_medal_clean$rem_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Negative Mood {- .tabset}

### Current Mood {-}
```{r}
describe.by(df_medal_clean$neg_mood, group=df_medal_clean$condition)%>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))

```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
ggplot(data= df_medal_clean, aes(x=neg_mood_cs)) + geom_histogram()+facet_grid(cols=vars(condition))
```


### Recent Memory {-}

```{r}
describe.by(df_medal_clean$rec_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


### Remote Memory {-}

```{r}
describe.by(df_medal_clean$rem_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


# Main Effects {.tabset}
A simple linear regression analysis was done to look at the differences in positive and negative mood ratings per condition.

## Linear Mixed Models for PA and NA
```{r}
#positive mood 
positive_model = lmer(pos_mood ~ condition + (1|subjectcode), data = df_medal_clean)


#negative mood
negative_model = lmer(neg_mood ~ condition + (1|subjectcode), data = df_medal_clean)



asis_output(tab_model (positive_model, negative_model, 
                      show.se = T, show.df = T, show.aic = T, transform = NULL,  
                      show.stat = T, show.std = T, 
                      title = 'Condition Differences Mood Rating', dv.labels = c('PA Scaled',  'NA Scaled') )$knitr)
```

## Plot {-}
```{r message=FALSE, warning=FALSE}

#create boxplot for negative and positive affect for each group with significance levels 
combine_data = df_medal_clean %>%
  tidyr::pivot_longer(cols=c(pos_mood, neg_mood), names_to = 'mood_type', values_to = 'mood_rating') %>% 
  dplyr::mutate(mood_type = factor(mood_type, levels = c('pos_mood' , 'neg_mood')))

# Plot
lm_plot = ggplot(combine_data, aes(x=condition, y = mood_rating, fill= condition)) + 
  geom_boxplot() +
  labs(x ='Group', y ='Mood (scaled units)' ) +
  scale_x_discrete(labels = c('control' = 'Never Depressed', 'remitted' = 'Remitted', 'depressed' = 'Depressed' )) +
  facet_wrap( ~ mood_type, ncol =3, nrow = 2, labeller  = labeller(mood_type = c('pos_mood' = 'Positive Mood', 'neg_mood'  = 'Negative Mood'))) +
  geom_signif(comparisons = list(c('control', 'remitted'), c('control' , 'depressed'), c('remitted' , 'depressed' )), map_signif_level = T, y_position = c(15, 13, 11)) +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  theme_apa() + 
  guides(fill=F)
ggplotly(lm_plot)
ggsave('figure_1_main_effects.pdf', dpi = 320, width = 12, height = 8, path = "figures/")
```

## Follow-Up {-}
```{r}
emmeans::emmeans(positive_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
emmeans::emmeans(negative_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
```



# Hypothesis 1: Independent Models  

In this section we test the first models from hypothesis 1 as stated in the pre-registraiton. These analyses look at the relationship between recent and remote emotional memory for both positive and negative affect, while also looking at group differences in these relaitonships based on depression status. 

```{r}
# set model families
model_families = c('gauss-link', "gauss-log", 'gauss-inv', 'Gamma-link', 'Gamma-log')
# detect cores for later
ncores = detectCores()-1
```

## Positive Affect {- .tabset}

First we run the positive affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects. 

### Recent {- }
```{r}
# Model equation
h1_post_recent_eq = "pos_mood_cs ~ 1 + condition*rec_mem_pos_mood_cs_lag + gender + age + education + (1 + rec_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_post_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_recent_models,  dv.labels = names(h1_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
```

```{r}
summary(h1_pos_recent_models[[2]])
plot_diagnostics(h1_pos_recent_models)
car::vif(h1_pos_recent_models[[2]]) #Vifs were inspected without interaction and deemed OK
```

##### Follow-up {-}

In the follow-up we look at the pairwise differences in the slopes, and the indiivuds
```{r}
emmeans::emtrends(h1_pos_recent_models[[2]], pairwise ~ condition, var='rec_mem_pos_mood_cs_lag', lmerTest.limit = 3500, pbkrtest.limit = 3500 )
```

```{r}
#create separate dataframes 
df_medal_clean_control = df_medal_clean %>% filter(condition == 'control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition == 'remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition == 'depressed') 


m1_pos_rec_control <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  +  rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rec_remitted <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1 + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rec_depressed <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_pos_rec_control, m1_pos_rec_remitted, m1_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```


### Remote {-}
```{r}
# Model equation
h1_pos_remote_eq = "pos_mood_cs ~ 1 +  condition*rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_remote_models, dv.labels = names(h1_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
```

```{r}
summary(h1_pos_remote_models[[2]])
plot_diagnostics(h1_pos_remote_models)
car::vif(h1_pos_remote_models[[2]]) #VIFs without interactions were fine
```


##### Follow-up {-}
```{r}
emmeans::emtrends(h1_pos_remote_models[[4]], pairwise ~ condition, var='rem_mem_pos_mood_cs_lag' )

```


```{r}

m1_pos_rem_control <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_control,
                    control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_remitted <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_remitted,
                     control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_depressed <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                       data=df_medal_clean_depressed,
                       control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_pos_rem_control, m1_pos_rem_remitted, m1_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 

```




## Negative Affect {- .tabset}

We next run the negative affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects

### Recent {-}
```{r}
# Model equation
h1_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag  + gender + age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)"
# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = 'remove') %dopar% {
  fit_all_mods(h1_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_recent_models,  dv.labels = names(h1_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```


```{r}
summary(h1_neg_recent_models[[4]])
plot_diagnostics(h1_neg_recent_models)
car::vif(h1_neg_recent_models[[4]])
```



#### Follow-up {-}
```{r}
emmeans::emtrends(h1_neg_recent_models[[4]], pairwise ~ condition, var='rec_mem_neg_mood_cs_lag' )

emmeans::emmeans(h1_neg_recent_models[[4]], identity ~ rec_mem_neg_mood_cs_lag | condition )
```


```{r}

m1_neg_rec_control <- glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_remitted <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_depressed <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rec_control, m1_neg_rec_remitted, m1_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```

### Remote {-}
```{r}
# Model equation
h1_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag  + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_remote_models,   dv.labels = names(h1_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```


```{r}
summary(h1_neg_remote_models[[2]])
plot_diagnostics(h1_neg_remote_models)
car::vif(h1_neg_remote_models[[2]])
```


#### Follow-up {-}
```{r}
emmeans::emtrends(h1_neg_remote_models[[2]], pairwise ~ condition, var='rem_mem_neg_mood_cs_lag' )

```

```{r}

m1_neg_rem_control <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_remitted <- glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_depressed <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rem_control, m1_neg_rem_remitted, m1_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```



# Hypothesis 2: Current Affect Moderation 

For H2, we want to see whether current affective states moderate the ability to recall previous affective states. That is, do current negative affect feelings moderate the relationship between affect and recall? 

```{r}
#create separate dataframes for each group
df_medal_clean_control = df_medal_clean %>% filter(condition=='control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition=='remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition=='depressed')

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_clean_remote = df_medal_clean %>% filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717))

#create separate dataframes per condition 
df_medal_clean_remote_control = df_medal_clean_remote %>% filter(condition=='control')
df_medal_clean_remote_remitted = df_medal_clean_remote %>% filter(condition=='remitted')
df_medal_clean_remote_depressed = df_medal_clean_remote %>% filter(condition=='depressed')
```

## Positive Affect {- .tabset}

### Recent {-}
An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted. 
```{r fig.height=20, fig.width=15}
h2_pos_recent_eq = "pos_mood_cs ~ condition*rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_pos_mood_cs_lag + pos_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_recent_models,  dv.labels = names(h2_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)


```

Due to singularity this was determined to be the best fit for the moddel

```{r}
summary(h2_pos_recent_models[[2]])
plot_diagnostics(h2_pos_recent_models)
car::vif(h2_pos_recent_models[[2]])
```



#### Follow-up {-}
```{r}
m2_pos_rec_control <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_control)

m2_pos_rec_remitted <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_remitted)

m2_pos_rec_depressed <-  lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_depressed)

asis_output(tab_model(m2_pos_rec_control, m2_pos_rec_remitted, m2_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr)
```


### Remote {- .tabset}

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted. 
```{r}
h2_pos_remote_eq = "pos_mood_cs ~ condition*rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem + gender + age + education + (1 | subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_remote_models,  dv.labels = names(h2_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)


```


Due to singularity a regular linear model was fitted.
```{r}
summary(h2_pos_remote_models[[2]])
plot_diagnostics(h2_pos_remote_models)
car::vif(h2_pos_remote_models[[2]])

```


#### Follow-up {-}
```{r}

m2_pos_rem_control <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_control)

m2_pos_rem_remitted <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education,  
                   data=df_medal_clean_remote_remitted)

m2_pos_rem_depressed <-  lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_depressed)

asis_output(tab_model(m2_pos_rem_control, m2_pos_rem_remitted, m2_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```



## Negative Affect {- .tabset}

###  Recent {-}
```{r}
h2_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_neg_mood_cs_lag + neg_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_recent_models, dv.labels = names(h2_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```

Check Final Model
```{r }
summary(h2_neg_recent_models[[4]])
plot_diagnostics(h2_neg_recent_models) 
car::vif(h2_neg_recent_models[[4]])

```




#### Follow-up {-}
```{r}

m2_neg_rec_control <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + 
                              gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_remitted <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_depressed <-  glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode),  
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rec_control, m2_neg_rec_remitted, m2_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```




### Remote {-}
```{r}
h2_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem + gender + age + education + (1 + rem_mem_neg_mood_cs_lag + neg_mood_cs_lag_rem|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_remote_models, dv.labels = names(h2_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```

Check Final Model
```{r}
summary(h2_neg_remote_models[[4]])
plot_diagnostics(h2_neg_remote_models)
car::vif(h2_neg_remote_models[[4]])
```




#### Follow-up {-}
```{r}

m2_neg_rem_control <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_remitted <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode),
                   data=df_medal_clean_remote_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_depressed <-  glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rem_control, m2_neg_rem_remitted, m2_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```


# Exploratory Analyses

## Mediation Models {.tabset}

Following the tests we ran for hypothesis 1, we would also like to see whether or not mediation effects exist. 

### RecPA {-}

```{r}

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_moderation =  df_medal_clean %>% 
  filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717)) %>% 
  filter(!(is.na(pos_mood) | is.na(rec_mem_pos_mood_lag))) 

# med model
med_fit_h1_pos_rec <- lme4::lmer(rec_mem_pos_mood_lag ~ condition_dummy +
                                   age + gender + education + 
                                   (1 |subjectcode),
                                 data = df_medal_moderation)
# full model
out_fit_h1_pos_rec <-lme4::lmer(pos_mood ~ condition_dummy + rec_mem_pos_mood_lag +
                                  age + gender +education +
                                  (1 + rec_mem_pos_mood_lag |subjectcode), 
                                data = df_medal_moderation)

#mediation analysis for control and depressed
med_pos_rec_dep <- mediation::mediate(med_fit_h1_pos_rec, out_fit_h1_pos_rec,
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 3 ,  mediator = 'rec_mem_pos_mood_lag', 
                                      sims=10000)
summary(med_pos_rec_dep)

```


### RemNA {-}
```{r}
df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rec_mem_neg_mood_lag))) 

# med model
med_fit_h1_neg_rec <- lme4::lmer(rec_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +  
                                   (1 |subjectcode), 
                                 data = df_medal_moderation)
# full model
out_fit_h1_neg_rec <- lme4::lmer(neg_mood ~ condition_dummy + rec_mem_neg_mood_lag +
                                  age + gender +education + 
                                  (1 + rec_mem_neg_mood_lag|subjectcode), 
                                data = df_medal_moderation)


#mediation analysis for control and remitted
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' , mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 2 ,   
                           sims=10000))

#mediation analysis for control and depressed
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' ,  mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 3 ,
                           sims=10000))

```

## Moderated Mediation {.tabset}

### RemPA-CurrPA {-}
```{r}

df_medal_moderation = df_medal_clean_remote %>% 
  filter(!(is.na(pos_mood) | is.na(rem_mem_pos_mood_lag))) 

#mediation first
med_fit_h2_pos_rem <- lme4::lmer(rem_mem_pos_mood_lag ~ condition_dummy + 
                                   age + gender + education + 
                                   (1 |subjectcode), 
                                 data =df_medal_moderation)
# full model
out_fit_h2_pos_rem <-lme4::lmer(pos_mood ~ condition_dummy + rem_mem_pos_mood_lag 
                                + age + gender +education +
                                  (1 + rem_mem_pos_mood_lag |subjectcode),
                                data=df_medal_moderation)


#mediation analysis for control and remitted
med_pos_rem_rem <- mediation::mediate(med_fit_h2_pos_rem, out_fit_h2_pos_rem, 
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_pos_mood_lag')
summary(med_pos_rem_rem)

#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_pos_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV =condition_dummy, DV = pos_mood, M = rem_mem_pos_mood_lag, Mod = pos_mood_lag_rem) 
rbindlist(med_h2_pos_rem_rem$paths, idcol = T)

```

### RemNA - CurrNA {-}
```{r paged.print=TRUE}
df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rem_mem_neg_mood_lag))) 

#mediation first
med_fit_h2_neg_rem <- lme4::lmer(rem_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +
                                   (1 |subjectcode),
                                 control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                 data =df_medal_moderation)

out_fit_h2_neg_rem <-lme4::lmer(neg_mood ~ condition_dummy + rem_mem_neg_mood_lag +
                                  age + gender + education +
                                  (1 + rem_mem_neg_mood_lag |subjectcode), 
                                control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                data=df_medal_moderation)

#mediation analysis for control and depressed
med_neg_rem_dep <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem,  
                                      mediator = 'rem_mem_neg_mood_lag',  treat = 'condition_dummy' , 
                                      control.value = 1, treat.value = 3 , sims=10000) 
summary(med_neg_rem_dep)

#mediation analysis for control and remitted
med_neg_rem_rem <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem, treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_neg_mood_lag', sims=10000) 
summary(med_neg_rem_rem)

#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_neg_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_rem$paths, idcol = T)

#depressed group
df_medal_con_dep$condition_dummy = build_contrast(df_medal_con_dep$condition, 'control', 'depressed')
med_h2_neg_rem_dep <- mdt_moderated(data = df_medal_con_dep, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_dep$paths,idcol = T)
  
```

## Context

```{r}
asis_output(tab_model( (lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)), 
           (lmer(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
            (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
             (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)))$knitr)

```

## SRET

In addition to the EMA, participants also completed the SRET. This is a lab based memory bias measure. We want to check whether our memory bias measures from real-life are linked to ones from the lab in this supplementary analysis. 

```{r}

library('haven')
sret_file = file.path("Z:/", "inbox", "transfer-2023-12-07-02-15-pm", 'MEDAL_pre and post quest and remote recall_workfile!!_for paper only var.sav')
sret_data = read_sav(sret_file) 
sret_data = sret_data %>% select(c(1,4)) %>% distinct()

df_medal_rec_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rec_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()  %>% ungroup()


df_medal_rec_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rec_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct() %>% ungroup()


df_medal_rem_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rem_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()

df_medal_rem_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rem_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()


df_cors = rbindlist(list(df_medal_rec_cor1, df_medal_rec_cor2, df_medal_rem_cor1, df_medal_rem_cor2))

df_ref = merge(sret_data, df_cors, by.x = 'subject', by.y = 'subjectcode')
df_ref = df_ref %>% rename(SRET = SRET_BiasGotlib_neg) %>% dplyr::mutate(group = ifelse(subject<800, "remitted", 
                                                                    ifelse(subject>=900, "control", 'depressed')) )


for (mod_type in c("PA_REC", "PA_REM", "NA_REC", "NA_REM")){
  print(mod_type)
  print(summary(lm(SRET ~ corr*group, data = df_ref %>% filter(model_type == mod_type))))
}


```


## Relative Bias

Here we want to check for the relative NA/PA bias. So we extact the random effects from the models to get the slopes for each subject, and then compare the NA/PA slopes. This tells us if theres a bias between negative and positive memory.

```{r}

# Get individual slopes
pos_rec_ref = as.data.frame(ranef(h1_pos_recent_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rem_ref = as.data.frame(ranef(h1_pos_remote_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rec_ref = as.data.frame(ranef(h1_neg_recent_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rem_ref = as.data.frame(ranef(h1_neg_remote_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rec_ref$mod = "PA_REC"
pos_rem_ref$mod = "PA_REM"
neg_rec_ref$mod = "NA_REC"
neg_rem_ref$mod = "NA_REM"

# Put together into a new data frame
df_ref = rbindlist(list(pos_rec_ref, pos_rem_ref, neg_rec_ref, neg_rem_ref))
df_ref = df_ref %>% 
  dplyr::mutate(sub = as.numeric(as.character(grp))) %>%
  dplyr::mutate(group = ifelse(sub <= 800, "remitted", 
                               ifelse(sub>=900, "control", 'depressed')) )

df_ref_rec = df_ref %>% filter(mod == 'PA_REC' | mod=='NA_REC')
df_ref_rem = df_ref %>% filter(mod == 'PA_REM' | mod=='NA_REM')
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rec))
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rem))
```



# Plots

First we set our theme and fix the data for the figures we want
```{r}
# Theme
ggtheme = theme(text = element_text(size = 8), 
                panel.background = element_rect(fill = "transparent"), 
                panel.grid.major = element_line(color = "grey90"),
                panel.grid.minor = element_line(color = "grey100"),
                panel.border = element_rect(color = "grey80", fill = "transparent"))

# Estimate the SD
df_medal_clean2 = df_medal_clean %>% 
  mutate(pos_mood_sd = ifelse( (pos_mood_cs_lag_rec > (mean(pos_mood_cs_lag_rec, na.rm=T)+sd(pos_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (pos_mood_cs_lag_rec < (mean(pos_mood_cs_lag_rec, na.rm=T)-sd(pos_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean"))) %>% 
    mutate(neg_mood_sd = ifelse( (neg_mood_cs_lag_rec > (mean(neg_mood_cs_lag_rec, na.rm=T)+sd(neg_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (neg_mood_cs_lag_rec < (mean(neg_mood_cs_lag_rec, na.rm=T)-sd(neg_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean")))

# Fix factor levels
df_medal_clean2$Condition =  factor(df_medal_clean2$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('Never-Depressed', 'Remitted', 'Depressed'))
df_medal_clean2$pos_mood_sd =  factor(df_medal_clean2$pos_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))
df_medal_clean2$neg_mood_sd =  factor(df_medal_clean2$neg_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))


```

### PA-CUR-REC
```{r}

# Plot
plot_pa_rec = ggplot(df_medal_clean2, aes(x = rec_mem_pos_mood_c_lag, y=pos_mood_c, color = pos_mood_sd, fill = pos_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC[PM], " (subject-centered a.u.)")), y = "PM (subject-centered a.u.)", color = bquote(CUR[PM]), fill = bquote(CUR[PM]) ) +
  ggtheme + 
  facet_grid(.~Condition) 
plot_pa_rec
```

### PA-CUR-REC

```{r}

# Plot
plot_na_rec = 
  ggplot(df_medal_clean2, aes(x = rec_mem_neg_mood_c_lag, y=neg_mood_c, color = neg_mood_sd, fill = neg_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC['NM'], " (subject-centered a.u.)")), y = "NM (subject-centered a.u.)", color = bquote(CUR["NM"]), fill = bquote(CUR["NM"]) )+
  ggtheme + 
  facet_grid(.~Condition) 
plot_na_rec
```

### Combined Plot for pub
```{r}
ggarrange(NA, plot_pa_rec, NA,  plot_na_rec,
          widths=c(0.05, 1, 0.05, 1), ncol = 2, nrow = 2,
          labels=c("A", NA, "B"))
ggsave("figures/figure_2_threewayInteraction.pdf", device = 'pdf', dpi = 320, height = 6)

```


# System Info

```{r}
sessionInfo()

```




